From 78fb94c21ec39ee21f4c54f6980eec8d76a75f1f Mon Sep 17 00:00:00 2001 From: Daniel Schmidt Date: Fri, 12 Jan 2024 16:21:19 +0100 Subject: [PATCH] chore: split up resource documentation --- website/data/cdktf-nav-data.json | 25 +- website/docs/cdktf/concepts/resources.mdx | 1178 ----------------- website/docs/cdktf/resources/basics.mdx | 397 ++++++ website/docs/cdktf/resources/escape-hatch.mdx | 545 ++++++++ website/docs/cdktf/resources/importing.mdx | 234 ++++ website/docs/cdktf/resources/refactoring.mdx | 21 + 6 files changed, 1218 insertions(+), 1182 deletions(-) delete mode 100644 website/docs/cdktf/concepts/resources.mdx create mode 100644 website/docs/cdktf/resources/basics.mdx create mode 100644 website/docs/cdktf/resources/escape-hatch.mdx create mode 100644 website/docs/cdktf/resources/importing.mdx create mode 100644 website/docs/cdktf/resources/refactoring.mdx diff --git a/website/data/cdktf-nav-data.json b/website/data/cdktf-nav-data.json index b4701e49af..53097e51e3 100644 --- a/website/data/cdktf-nav-data.json +++ b/website/data/cdktf-nav-data.json @@ -10,6 +10,27 @@ "title": "Get Started", "href": "https://learn.hashicorp.com/tutorials/terraform/cdktf-install?in=terraform/cdktf" }, + { + "title": "Resources", + "routes": [ + { + "title": "Basiscs", + "path": "resources/basics" + }, + { + "title": "Importing", + "path": "resources/importing" + }, + { + "title": "Refactoring", + "path": "resources/refactoring" + }, + { + "title": "Escape Hatches", + "path": "resources/escape-hatches" + } + ] + }, { "title": "Concepts", "routes": [ @@ -29,10 +50,6 @@ "title": "Providers", "path": "concepts/providers" }, - { - "title": "Resources", - "path": "concepts/resources" - }, { "title": "Modules", "path": "concepts/modules" diff --git a/website/docs/cdktf/concepts/resources.mdx b/website/docs/cdktf/concepts/resources.mdx deleted file mode 100644 index f962011d88..0000000000 --- a/website/docs/cdktf/concepts/resources.mdx +++ /dev/null @@ -1,1178 +0,0 @@ ---- -page_title: Resources - CDK for Terraform -description: >- - Resources describe one or more infrastructure objects, like virtual networks, compute instances, and DNS records. Define resources in a CDK for Terraform application. ---- - -# Resources - -Resources are the most important element when defining infrastructure in CDKTF applications. Each resource describes one or more infrastructure objects, such as virtual networks, compute instances, or higher-level components such as DNS records. - -In your CDK for Terraform (CDKTF) application, you will use your preferred programming language to define the resources you want Terraform to manage on one or more [providers](/terraform/cdktf/concepts/providers). This page explains how to use resources in your application and how to use [escape hatches](#escape-hatch) to change resource behavior when necessary. - -## Define Resources - -Resource definitions and properties vary depending on the type of resource and the provider. Consult your provider's documentation for a full list of available resources and their configuration options. - -The following example defines a [DynamoDB table](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dynamodb_table) resource on the AWS provider. - - - - - - - - - -```ts -export class ResourcesStack extends TerraformStack { - constructor(scope: Construct, id: string) { - super(scope, id); - - new AwsProvider(this, "aws", { - region: "eu-central-1", - }); - - const region = new DataAwsRegion(this, "region"); - - new DynamodbTable(this, "first-table", { - name: `my-first-table-${region.name}`, - hashKey: "temp", - attribute: [{ name: "id", type: "S" }], - billingMode: "PAY_PER_REQUEST", - }); - } -} -``` - -```java -import software.constructs.Construct; -import com.hashicorp.cdktf.TerraformStack; -import imports.aws.dynamodb_table.DynamodbTable; -import imports.aws.dynamodb_table.DynamodbTableAttribute; -import imports.aws.dynamodb_table.DynamodbTableConfig; - - -import java.util.Arrays; - -public class MainResourcesDefine extends TerraformStack { - - public MainResourcesDefine(Construct scope, String id){ - super(scope, id); - - new AwsProvider(this, "aws", AwsProviderConfig.builder() - .region("us-east-1") - .build() - ); - - DataAwsRegion region = new DataAwsRegion(this, "region"); - - new DynamodbTable(this, "hello", DynamodbTableConfig.builder() - .name("my-first-table-"+region.getName()) - .hashKey("temp") - .attribute(Arrays.asList( - DynamodbTableAttribute.builder() - .name("id") - .type("S") - .build() - ) - ) - .billingMode("PAY_PER_REQUEST") - .build() - ); - } -} -``` - -```python -from constructs import Construct -from cdktf import TerraformStack - -class ResourceStack(TerraformStack): - def __init__(self, scope: Construct, id: str): - super().__init__(scope, id) - - AwsProvider(self, "aws", - region="us-east-1" - ) - - region = DataAwsRegion(self, "region") - - DynamodbTable(self, "Hello", - name="my-first-table-{}".format(region.name), - hash_key="temp", - attribute=[{"name": "id", "type": "S"}], - billing_mode="PAY_PER_REQUEST" - ) -``` - -```csharp - DataAwsRegion region = new DataAwsRegion(this, "region"); - - new DynamodbTable(this, "first-table", new DynamodbTableConfig - { - Name = $"my-first-table-{region.Name}", - HashKey = "temp", - Attribute = new DynamodbTableAttribute[] { - new DynamodbTableAttribute { - Name = "id", - Type = "S" - } - }, - BillingMode = "PAY_PER_REQUEST" - }); -``` - -```go -import ( - "fmt" - - "github.com/aws/constructs-go/constructs/v10" - "github.com/aws/jsii-runtime-go" - "github.com/hashicorp/terraform-cdk-go/cdktf" - "github.com/hashicorp/terraform-cdk/examples/go/documentation/generated/hashicorp/aws/dataawsregion" - "github.com/hashicorp/terraform-cdk/examples/go/documentation/generated/hashicorp/aws/dynamodbtable" - aws "github.com/hashicorp/terraform-cdk/examples/go/documentation/generated/hashicorp/aws/provider" - -) - -func NewResourcesStack(scope constructs.Construct, name string) cdktf.TerraformStack { - stack := cdktf.NewTerraformStack(scope, &name) - - aws.NewAwsProvider(stack, jsii.String("aws"), &aws.AwsProviderConfig{ - Region: jsii.String("eu-central-1"), - }) - - region := dataawsregion.NewDataAwsRegion(stack, jsii.String("region"), &dataawsregion.DataAwsRegionConfig{}) - - dynamodbtable.NewDynamodbTable(stack, jsii.String("first-table"), &dynamodbtable.DynamodbTableConfig{ - Name: jsii.String(fmt.Sprintf("my-first-table-%s", *region.Name())), - HashKey: jsii.String("id"), - Attribute: []map[string]string{ - {"name": "id", "type": "S"}, - }, - BillingMode: jsii.String("PAY_PER_REQUEST"), - }) - - - return stack -} - -``` - - - -The [examples page](/terraform/cdktf/examples-and-guides/examples) contains multiple example projects for every supported programming language. - -## Scope - -You can instantiate the same resource multiple times throughout your infrastructure. For example, you may want to create multiple S3 Buckets with different configurations. Instances that share the same parent element are considered to be part of the same scope. You must set a different `name` property for each instance to avoid naming conflicts. - -Refer to the [constructs documentation](/terraform/cdktf/concepts/constructs#scope) for more details and an example. - -## References - -You can reference resource properties throughout your configuration. For example, you may want to use the name of a parent resource when assigning names to related child resources. Refer to your provider's documentation for a full list of available properties for each resource type. - -To create references, call `myResource.` on the resource instance. For example, you could use `myResource.name` to retrieve the `name` property from `myResource`. Terraform does not support passing an entire block (e.g. `exampleNamespace.metadata`) into a resource or data source, so you must create a reference for each individual property. - -References are also useful when you need to track logical dependencies. For example, Kubernetes resources live in a namespace, so a namespace must exist before Terraform can provision the associated resources. The following example uses a reference for the namespace property in the the deployment. This reference tells Terraform that it needs to create the namespace before creating the resources. - - - - - - - - - -```ts -const exampleNamespace = new Namespace(this, "tf-cdk-example", { - metadata: { - name: "tf-cdk-example", - }, -}); - -new Deployment(this, "nginx-deployment", { - metadata: { - name: "nginx", - namespace: exampleNamespace.metadata.name, // Reference the namespace name propery - labels: { - app: "my-app", - }, - }, - spec: { - // ... - }, -}); -``` - -```java -import imports.kubernetes.namespace.Namespace; -import imports.kubernetes.namespace.NamespaceConfig; -import imports.kubernetes.namespace.NamespaceMetadata; - Namespace exampleNamespace = new Namespace(this, "tf-cdk-example", NamespaceConfig.builder() - .metadata(NamespaceMetadata.builder() - .name("tf-cdk-example") - .build()) - .build()); - - new Deployment(this, "nginx-deployment", DeploymentConfig.builder() - .metadata(DeploymentMetadata.builder() - .name("nginx") - .namespace(exampleNamespace.getMetadata().getName()) // Reference the name property - .labels(new HashMap() { - { - put("app", app.toString()); - } - }) - .build()) - .spec(DeploymentSpec.builder() - .selector(DeploymentSpecSelector.builder() - .matchLabels(new HashMap(){{ - put("app", app.toString()); - }}) - .build() - ) - .replicas("1") - .template(DeploymentSpecTemplate.builder() - .metadata(DeploymentSpecTemplateMetadata.builder() - .labels(new HashMap(){{ - put("app", app.toString()); - }}) - .build() - ) - .spec(DeploymentSpecTemplateSpec.builder() - .container(Arrays.asList(DeploymentSpecTemplateSpecContainer.builder() - .image("nginx:1.7.9") - .name("nginx") - .build() - ) - ) - .build() - ) - .build() - ) - .build() - ) - .build()); -``` - -```python - - exampleNamespace = Namespace(self, "tf-cdk-example", - metadata=NamespaceMetadata(name="tf-cdk-example") - ) - - Deployment(self, "nginx-deployment", - metadata=DeploymentMetadata( - name="nginx", - namespace=exampleNamespace.metadata.name, # Reference the name property - labels={"app": app} - ), - spec=DeploymentSpec( - selector=DeploymentSpecSelector( - match_labels={"app": app} - ), - replicas="1", - template=DeploymentSpecTemplate( - metadata=DeploymentSpecTemplateMetadata( - labels={"app": app} - ), - spec=DeploymentSpecTemplateSpec( - container=[ - DeploymentSpecTemplateSpecContainer( - image="nginx:1.7.9", name="nginx") - ] - ) - ) - ) - ) -``` - -```csharp - Namespace exampleNamespace = new Namespace(this, "tf-cdk-example", new NamespaceConfig - { - Metadata = new NamespaceMetadata - { - Name = "tf-cdk-example" - } - }); - - new Deployment(this, "nginx-deployment", new DeploymentConfig - { - Metadata = new DeploymentMetadata - { - Name = "nginx", - Namespace = exampleNamespace.Metadata.Name, // Reference the name property - Labels = new Dictionary { - { "app", "my-app" } - } - }, - Spec = new DeploymentSpec - { - Template = new DeploymentSpecTemplate - { - Metadata = new DeploymentSpecTemplateMetadata - { - Labels = new Dictionary { - { "app", "my-app" } - } - }, - Spec = new DeploymentSpecTemplateSpec - { - Container = new DeploymentSpecTemplateSpecContainer[] { - new DeploymentSpecTemplateSpecContainer { - Name = "nginx", - Image = "nginx:1.7.9" - } - } - } - } - } - }); -``` - -```go -exampleNamespace := namespace.NewNamespace(stack, jsii.String("tf-cdk-example"), &namespace.NamespaceConfig{ - Metadata: &namespace.NamespaceMetadata{ - Name: jsii.String("tf-cdk-example"), - }, -}) - -deployment.NewDeployment(stack, jsii.String("nginx-deployment"), &deployment.DeploymentConfig{ - Metadata: &deployment.DeploymentMetadata{ - Name: jsii.String("nginx"), - Namespace: exampleNamespace.Metadata().Name(), // Reference the name property - Labels: &map[string]*string{ - "app": jsii.String("my-app"), - }, - }, - Spec: &deployment.DeploymentSpec{ - Template: &deployment.DeploymentSpecTemplate{ - Metadata: &deployment.DeploymentSpecTemplateMetadata{ - Labels: &map[string]*string{ - "app": jsii.String("my-app"), - }, - }, - Spec: &deployment.DeploymentSpecTemplateSpec{ - Container: []deployment.DeploymentSpecTemplateSpecContainer{ - { - Name: jsii.String("nginx"), - Image: jsii.String("nginx:1.7.9"), - }, - }, - }, - }, - }, -}) -``` - - - -## Refactoring & Renaming Resources - -When working with your infrastructure definitions and the need arises to refactor or rename resources without destroying and recreating them, you can leverage the `moveTo` function like so: - -```ts -new S3Bucket(this, "test-bucket-move-to", { - bucket: "move-bucket-name", -}).addMoveTarget("move-s3"); - -new S3Bucket(this, "test-bucket-move-from", { - bucket: "move-bucket-name", -}).moveTo("move-s3"); -``` - -Refer to our [Refactoring Guide](/terraform/cdktf/examples-and-guides/refactoring#moving-renaming-resources-within-a-stack) for more information - -## Provisioners - -Provisioners can be used to model specific actions on the local machine or on a remote machine in order to prepare servers or other infrastructure objects for service. You can find more information on the concept of provisioners in the [Terraform docs](/terraform/language/resources/provisioners/syntax). You can pass the `provisioners` key to define a list of provisioners, connections can be configured with the `connection` key. A working example can be found at [examples/typescript/provisioner](https://github.com/hashicorp/terraform-cdk/blob/main/examples/typescript/provisioner/main.ts). - -If you need to use the special [`self` object](/terraform/language/resources/provisioners/syntax#the-self-object) that can only be used in `provisioner` and `connection` blocks to refer to the parent resource you can use the `TerraformSelf` class like this: `TerraformSelf.getString("public_ip")`. - -## Custom Condition Checks - -If you need to ensure a condition is met either before or after a resource was created you can specify [conditions](/terraform/language/expressions/custom-conditions#preconditions-and-postconditions). -To add one configure the `lifecycle` key on your resource with an object containing a `precondition` and / or a `postcondition`. These keys take a list of conditions with a `condition` key containing a Terraform Expression to be evaluated and an `errorMessage` key containing a string to be displayed if the condition is not met. - -## Importing Resources - -If you have existing resources that you want to manage with CDKTF, you can import them into your CDKTF application. The best way to do this is using the [`import` block feature](/terraform/language/import) of Terraform >= 1.5. You can do this in CDKTF either with a specified configuration or without. - -### How To Import - -To import a resource, first instantiate an instance of the resource type you wish to import – in our case we'll be using an S3Bucket. No configuration is explicitly needed. You then call the `importFrom` method on the resource object. This method takes the ID of the resource to be imported as the first argument and the provider as an optional second. The provider is only required if you have multiple providers of the same type in your configuration. - -```typescript -new S3Bucket(this, "bucket", {}).importFrom(bucketId); -``` - -When running plan / apply you will get the information that your resource is going to be imported. Once you have ran apply, you can remove the `importFrom` call and the resource will become managed by CDKTF. - -Please note that Terraform is going to update existing fields on the imported resource to match your configuration as it puts it under management. In our case we did not define any specific properties on the `S3Bucket` which causes Terraform e.g. to remove the tags currently defined on the resource (as can be seen on the plan below). If you want to keep existing settings, you can run a plan first, add everything that Terraform would change to your resource config, and only then apply the changes. - -Your output might look as follows: - -``` -ts-import Initializing the backend... -ts-import Initializing provider plugins... -ts-import - Reusing previous version of hashicorp/aws from the dependency lock file -ts-import - Using previously-installed hashicorp/aws v5.5.0 -ts-import Terraform has been successfully initialized! - - You may now begin working with Terraform. Try running "terraform plan" to see - any changes that are required for your infrastructure. All Terraform commands - should now work. - - If you ever set or change modules or backend configuration for Terraform, - rerun this command to reinitialize your working directory. If you forget, other - commands will detect it and remind you to do so if necessary. -ts-import aws_s3_bucket.bucket (bucket): Preparing import... [id=best-bucket-in-the-world] -ts-import aws_s3_bucket.bucket (bucket): Refreshing state... [id=best-bucket-in-the-world] -ts-import Terraform used the selected providers to generate the following execution - plan. Resource actions are indicated with the following symbols: - ~ update in-place - - Terraform will perform the following actions: -ts-import # aws_s3_bucket.bucket (bucket) will be updated in-place - # (imported from "best-bucket-in-the-world") - ~ resource "aws_s3_bucket" "bucket" { - arn = "arn:aws:s3:::best-bucket-in-the-world" - bucket = "best-bucket-in-the-world" - bucket_domain_name = "best-bucket-in-the-world.s3.amazonaws.com" - bucket_regional_domain_name = "best-bucket-in-the-world.s3.us-east-1.amazonaws.com" - + force_destroy = false - hosted_zone_id = "XXXXXXXXXXXXX" - id = "best-bucket-in-the-world" - object_lock_enabled = false - region = "us-east-1" - request_payer = "BucketOwner" - ~ tags = { - - "foo" = "bar" -> null - } - ~ tags_all = { - - "foo" = "bar" - } -> (known after apply) - - grant { - id = "XXXXXXXXXXXXX" - permissions = [ - "FULL_CONTROL", - ] - type = "CanonicalUser" - } - - server_side_encryption_configuration { - rule { - bucket_key_enabled = true - - apply_server_side_encryption_by_default { - sse_algorithm = "AES256" - } - } - } - - versioning { - enabled = true - mfa_delete = false - } - } - - Plan: 1 to import, 0 to add, 1 to change, 0 to destroy. - - ───────────────────────────────────────────────────────────────────────────── - - Saved the plan to: plan - - To perform exactly these actions, run the following command to apply: - terraform apply "plan" -``` - -### Generate Configuration For Import - -If you don't want to specify the configuration of your imported resource yourself you can use the static method `generateConfigForImport` on the class of the resource you want to import. This method takes the scope as the first argument, the construct id of the resource to import to (as will be given in the generated config returned), the resource id of the resource to be imported, and the provider as an optional fourth. The provider is only required if you have multiple providers of the same type in your configuration. - -```typescript -S3Bucket.generateConfigForImport(this, "bucket", bucketId); -``` - -When running `cdktf plan ` Terraform will generate code for the resource you are importing and CDKTF will convert it to the language you are using. - -Your output might look as follows: - -``` -ts-import-with-configuration Initializing the backend... -ts-import-with-configuration Initializing provider plugins... - - Reusing previous version of hashicorp/aws from the dependency lock file -ts-import-with-configuration - Using previously-installed hashicorp/aws v5.18.1 - - Terraform has been successfully initialized! -ts-import-with-configuration - You may now begin working with Terraform. Try running "terraform plan" to see - any changes that are required for your infrastructure. All Terraform commands - should now work. - - If you ever set or change modules or backend configuration for Terraform, - rerun this command to reinitialize your working directory. If you forget, other - commands will detect it and remind you to do so if necessary. -ts-import-with-configuration aws_s3_bucket.bucket: Preparing import... [id=best-bucket-in-the-world] -ts-import-with-configuration aws_s3_bucket.bucket: Refreshing state... [id=best-bucket-in-the-world] -ts-import-with-configuration Terraform will perform the following actions: -ts-import-with-configuration # aws_s3_bucket.bucket will be imported - # (config will be generated) - resource "aws_s3_bucket" "bucket" { - arn = "arn:aws:s3:::best-bucket-in-the-world" - bucket = "best-bucket-in-the-world" - bucket_domain_name = "best-bucket-in-the-world.s3.amazonaws.com" - bucket_regional_domain_name = "best-bucket-in-the-world.s3.us-east-1.amazonaws.com" - hosted_zone_id = "Z3AQBSTGFYJSTF" - id = "best-bucket-in-the-world" - object_lock_enabled = false - region = "us-east-1" - request_payer = "BucketOwner" - tags = {} - tags_all = {} - - grant { - id = "554912fda2704333d162d216be50aefb05562e0bf1709997f1d9417cf46087d5" - permissions = [ - "FULL_CONTROL", - ] - type = "CanonicalUser" - } - - server_side_encryption_configuration { - rule { - bucket_key_enabled = true - - apply_server_side_encryption_by_default { - sse_algorithm = "AES256" - } - } - } - - versioning { - enabled = false - mfa_delete = false - } - } - - Plan: 1 to import, 0 to add, 0 to change, 0 to destroy. - ╷ - │ Warning: Config generation is experimental - │ - │ Generating configuration during import is currently experimental, and the - │ generated configuration format may change in future versions. - ╵ - - ───────────────────────────────────────────────────────────────────────────── - - Terraform has generated configuration and written it to - generated_resources.tf. Please review the configuration and edit it as - necessary before adding it to version control. - - Saved the plan to: plan - - To perform exactly these actions, run the following command to apply: - terraform apply "plan" -ts-import-with-configuration Import without configuration detected. Terraform has created configuration for it: - # __generated__ by Terraform - # Please review these resources and move them into your main configuration files. - - # __generated__ by Terraform from "best-bucket-in-the-world" - resource "aws_s3_bucket" "bucket" { - bucket = "best-bucket-in-the-world" - bucket_prefix = null - force_destroy = null - object_lock_enabled = false - tags = {} - tags_all = {} - } - - - CDKTF has translated the code to the following: - - import { Construct } from "constructs"; - /* - * Provider bindings are generated by running `cdktf get`. - * See https://cdk.tf/provider-generation for more details. - */ - import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; - class MyConvertedCode extends Construct { - constructor(scope: Construct, name: string) { - super(scope, name); - new S3Bucket(this, "bucket", { - bucket: "best-bucket-in-the-world", - bucketPrefix: [null], - forceDestroy: [null], - objectLockEnabled: false, - tags: {}, - tagsAll: {}, - }); - } - } - - - Please review the code and make any necessary changes before adding it to your codebase. - Make sure to only copy the code within the construct's constructor. - - NOTE: Your resource has not yet become managed by CDKTF. - To finish the import remove the call "generateConfigForImport", add the above code within the construct's constructor, and then append the call importFrom() to the generated code: - - new SomeResource(...).importFrom("some_id") -``` - -Though at this point, your resource has not been imported. To import, first add the new generated configuration to your project, then remove the initial call of `generateConfigForImport`. Finally, follow the steps outlined in the section "How To Import" above. On apply, your resource will be imported, then becoming managed by CDKTF. - -## Escape Hatch - -Terraform provides [meta-arguments](/terraform/language/resources/syntax#meta-arguments) to change resource behavior. For example, the `for_each` meta-argument creates multiple resource instances according to a map, or set of strings. The escape hatch allows you to use these meta-arguments to your CDKTF application and to override attributes that CDKTF cannot yet fully express. - -The following example defines a provisioner for a resource using the `addOverride` method. - - - - - - - - - -```ts -const tableName = "my-second-table"; - -const table = new DynamodbTable(this, "second-table", { - name: tableName, - hashKey: "id", - attribute: [{ name: "id", type: "S" }], -}); - -table.addOverride("provisioner", [ - { - "local-exec": { - command: `aws dynamodb create-backup --table-name ${tableName} --backup-name ${tableName}-backup`, - }, - }, -]); -``` - -```java -import com.hashicorp.cdktf.TerraformVariable; -import com.hashicorp.cdktf.TerraformVariableConfig; -import imports.aws.dynamodb_table.DynamodbTable; -import imports.aws.dynamodb_table.DynamodbTableAttribute; -import imports.aws.dynamodb_table.DynamodbTableConfig; - String tableName = "my-table"; - - DynamodbTable table = new DynamodbTable(this, "Hello", DynamodbTableConfig.builder() - .name(tableName) - .hashKey("id") - .attribute(Arrays.asList( - DynamodbTableAttribute.builder() - .name("id") - .type("S") - .build())) - .build()); - - table.addOverride("provisioner", Arrays.asList( - new HashMap() { - { - put("local-exec", new HashMap() { - { - put("command", "aws dynamodb create-backup --table-name " - + tableName + " --backup-name " - + tableName + "-backup"); - } - }); - } - })); -``` - -```python - - tableName = "my-table" - - table = DynamodbTable(self, "Hello", - name=tableName, - hash_key="id", - attribute=[{"name": "id", "type": "S"}] - ) - - table.add_override("provisioner", [ - { - "local-exec": { - "command": f"aws dynamodb create-backup --table-name {tableName} --backup-name {tableName}-backup" - } - } - ]) -``` - -```csharp - String tableName = "my-second-table"; - DynamodbTable table = new DynamodbTable(this, "second-table", new DynamodbTableConfig - { - Name = tableName, - HashKey = "id", - Attribute = new DynamodbTableAttribute[] { - new DynamodbTableAttribute { - Name = "id", - Type = "S" - } - } - }); - - table.AddOverride("provisioner", new Dictionary[] { - new Dictionary { - { "local-exec", new Dictionary { - { "command", $"aws dynamodb create-backup --table-name {tableName} --backup-name {tableName}-backup" } - } } - } - }); - -``` - -```go -tableName := "my-second-table" -table := dynamodbtable.NewDynamodbTable(stack, jsii.String("second-table"), &dynamodbtable.DynamodbTableConfig{ - Name: &tableName, - HashKey: jsii.String("id"), - Attribute: []map[string]string{ - {"name": "id", "type": "S"}, - }, -}) -table.AddOverride(jsii.String("provisioner"), []map[string]map[string]string{ - {"local-exec": { - "command": fmt.Sprintf( - "aws dynamodb create-backup --table-name %s --backup-name %s-backup", - tableName, - tableName, - ), - }}, -}) -``` - - - -When you run `cdktf synth`, CDKTF generates a Terraform configuration with the [provisioner added to the JSON object](/terraform/language/syntax/json#nested-block-mapping). - -```json -{ - "resource": { - "aws_dynamodb_table": { - "helloterraHello69872235": { - "hash_key": "temp", - "name": "my-table", - "attribute": [ - { - "name": "id", - "type": "S" - } - ], - "provisioner": [ - { - "local-exec": { - "command": "aws dynamodb create-backup --table-name my-table --backup-name my-table-backup" - } - } - ] - } - } - } -} -``` - -To override an attribute, include the resource attribute key in `addOverride`. The attribute in the escape hatch is in snake case because the Terraform JSON configuration uses snake case instead of camel case. - - - - - - - - - -```ts -const topic = new SnsTopic(this, "Topic", { - displayName: "will-be-overwritten", -}); -topic.addOverride("display_name", "my-topic"); -``` - -```java -new SnsTopic(this, "Topic", SnsTopicConfig.builder() - .displayName("will-be-overwritten") - .build()).addOverride("display_name", "my-topic"); -``` - -```python - - topic = SnsTopic(self, "Topic", - display_name="will-be-overwritten" - ) - - topic.add_override("display_name", "my-topic") -``` - -```csharp -SnsTopic topic = new SnsTopic(this, "Topic", new SnsTopicConfig -{ - DisplayName = "will-be-overwritten" -}); -topic.AddOverride("display_name", "my-topic"); -``` - -```go -topic := snstopic.NewSnsTopic(stack, jsii.String("Topic"), &snstopic.SnsTopicConfig{ - DisplayName: jsii.String("will-be-overwritten"), -}) -topic.AddOverride(jsii.String("display_name"), jsii.String("my-topic")) -``` - - - -When you run `cdktf synth`, CDKTF generates a Terraform configuration with the value overwritten. - -```json -{ - "resource": { - "aws_sns_topic": { - "helloterraTopic6609C1D4": { - "display_name": "my-topic" - } - } - } -} -``` - -Use a dot notation to access elements in arrays: `resource.addOverride("configurations.0.https", true)`. - -### Escape Hatch for Dynamic Blocks - -Terraform configurations sometimes use [`dynamic` blocks](/terraform/language/expressions/dynamic-blocks) to create related resources based on dynamic data, or data that is only known after Terraform provisions the infrastructure. For example, you could create a series of nested blocks for a series of Virtual Private Cloud (VPC) ingress ports. A `dynamic` block loops over a complex value and generates a nested resource block for each element of that complex value. - -In CDKTF applications, you must use an escape hatch when you want to loop through a dynamic value like a `TerraformVariable` or a resource output. - -To use an escape hatch to loop over dynamic data, you must: - -- Set the first argument of `addOverride` to be `dynamic.`. -- Create a `for_each` value for the second argument and set it to the list you want to iterate over. -- Take the attribute as base for the reference when you reference values from the list. For example, use `"${.value.nested_value}"`. - -The following example adds ingress values by looping through the ports passed as `TerraformVariable`. - - - - - - - - - -```ts -const portsList = new TerraformVariable(this, "ports", { - type: "list", - default: [22, 80, 443, 5432], -}); - -const sg = new SecurityGroup(this, "security1", { - name: "security1", - vpcId: "vpcs", - egress: [ - { - fromPort: 0, - toPort: 0, - cidrBlocks: ["0.0.0.0/0"], - protocol: "-1", - }, - ], -}); -sg.addOverride("dynamic.ingress", { - for_each: portsList.listValue, - content: { - from_port: "${ingress.value}", - to_port: "${ingress.value}", - cidr_blocks: ["0.0.0.0/0"], - protocol: "-1", - }, -}); -``` - -```java -import com.hashicorp.cdktf.TerraformVariable; -import com.hashicorp.cdktf.TerraformVariableConfig; -import imports.aws.security_group.*; - TerraformVariable ports = new TerraformVariable(this, "ports", TerraformVariableConfig.builder() - .type("list") - .defaultValue(Arrays.asList(22, 80, 443, 5432)) - .build()); - - SecurityGroup sq = new SecurityGroup(this, "sec1grp", SecurityGroupConfig.builder() - .name("security1") - .vpcId("vpcs") - .egress(Arrays.asList( - SecurityGroupEgress.builder() - .fromPort(0) - .toPort(0) - .cidrBlocks(Arrays.asList("0.0.0.0/0")) - .protocol("-1") - .build())) - .build()); - - sq.addOverride("dynamic.ingress", new HashMap() { - { - put("for_each", ports.getListValue()); - put("content", new HashMap() { - { - put("from_port", "${ingress.value}"); - put("to_port", "${ingress.value}"); - put("cidr_blocks", Arrays.asList("0.0.0.0/0")); - put("protocol", "-1"); - } - }); - } - }); -``` - -```python - - ports = TerraformVariable(self, "ports", - type="list", - default=[22, 80, 443, 5432] - ) - - sq = SecurityGroup(self, "sec1grp", - name="security1", - vpc_id="vpcs", - egress=[ - { - "from_port": 0, - "to_port": 0, - "cidr_blocks": ["0.0.0.0/0"], - "protocol": "-1" - } - ] - ) - - sq.add_override("dynamic.ingress", { - "for_each": ports.list_value, - "content": { - "from_port": "${ingress.value}", - "to_port": "${ingress.value}", - "cidr_blocks": ["0.0.0.0/0"], - "protocol": "-1" - } - }) -``` - -```csharp -TerraformVariable portsList = new TerraformVariable(this, "ports", new TerraformVariableConfig -{ - Type = "list", -}); -SecurityGroup sg = new SecurityGroup(this, "security1", new SecurityGroupConfig -{ - Name = "security1", - VpcId = "vpcs", - Egress = new SecurityGroupEgress[] { - new SecurityGroupEgress { - FromPort = 0, - ToPort = 0, - CidrBlocks = new string[] { "0.0.0.0/0" }, - Protocol = "-1" - } - } -}); -sg.AddOverride("dynamic.ingress", new Dictionary { - { "for_each", portsList.ListValue }, - { "content", new Dictionary { - { "from_port", "${ingress.value}" }, - { "to_port", "${ingress.value}" }, - { "cidr_blocks", new string[] { "0.0.0.0/0" } }, - { "protocol", "-1" } - }} -}); -``` - -```go -portsList := cdktf.NewTerraformVariable(stack, jsii.String("ports"), &cdktf.TerraformVariableConfig{ - Type: jsii.String("list"), - Default: []int{22, 80, 443, 5432}, -}) -sg := securitygroup.NewSecurityGroup(stack, jsii.String("security1"), &securitygroup.SecurityGroupConfig{ - Name: jsii.String("security1"), - VpcId: jsii.String("vpcs"), - Egress: &[]securitygroup.SecurityGroupEgress{ - { - FromPort: jsii.Number(0), - ToPort: jsii.Number(0), - CidrBlocks: &[]*string{jsii.String("0.0.0.0/0")}, - Protocol: jsii.String("-1"), - }, - }, -}) -sg.AddOverride(jsii.String("dynamic.ingress"), &map[string]interface{}{ - "for_each": portsList.ListValue(), - "content": &map[string]interface{}{ - "from_port": "${ingress.value}", - "to_port": "${ingress.value}", - "cidr_blocks": []string{"0.0.0.0/0"}, - "protocol": "-1", - }, -}) -``` - - - -You should only use escape hatches when you need to work with dynamic values that are unknown until after Terraform provisions your infrastructure. If you are working with static values, we recommend using the functionality available in your preferred programming language to iterate through the array. - -The following example loops through the ports without using an escape hatch. - - - - - - - - - -```ts -const ports = [22, 80, 443, 5432]; - -new SecurityGroup(this, "security2", { - name: "security2", - vpcId: "vpcs", - egress: [ - { - fromPort: 0, - toPort: 0, - cidrBlocks: ["0.0.0.0/0"], - protocol: "-1", - }, - ], - ingress: ports.map((port) => ({ - fromPort: port, - toPort: port, - cidrBlocks: ["0.0.0.0/0"], - protocol: "-1", - })), -}); -``` - -```java - List myPorts = Arrays.asList(22, 80, 443, 5432); - List ingress = new ArrayList(); - myPorts.forEach(port -> ingress.add( - SecurityGroupIngress.builder() - .toPort(port) - .fromPort(port) - .cidrBlocks(Arrays.asList("0.0.0.0/0")) - .protocol("-1") - .build())); - - new SecurityGroup(this, "sec2grp", SecurityGroupConfig.builder() - .name("security1") - .vpcId("vpcs") - .egress(Arrays.asList( - SecurityGroupEgress.builder() - .fromPort(0) - .toPort(0) - .cidrBlocks(Arrays.asList("0.0.0.0/0")) - .protocol("-1") - .build())) - .ingress(ingress) - .build()); -``` - -```python - ports = [22, 80, 443, 5432] - - SecurityGroup(self, "sec1grp", - name="security1", - vpc_id="vpcs", - egress=[ - { - "fromPort": 0, - "toPort": 0, - "ciderBlocks": ["0.0.0.0/0"], - "protocol": "-1" - } - ], - ingress=[ - SecurityGroupIngress( - from_port=port, - to_port=port, - protocol="-1", - cidr_blocks=["0.0.0.0/0"] - ) for port in ports - ] - ) -``` - -```csharp -int[] ports = new int[] { 22, 80, 443, 5432 }; -new SecurityGroup(this, "security2", new SecurityGroupConfig -{ - Name = "security2", - VpcId = "vpcs", - Egress = new SecurityGroupEgress[] { - new SecurityGroupEgress { - FromPort = 0, - ToPort = 0, - CidrBlocks = new string[] { "0.0.0.0/0" }, - Protocol = "-1" - } - }, - Ingress = ports.Select(port => new SecurityGroupIngress - { - FromPort = port, - ToPort = port, - CidrBlocks = new string[] { "0.0.0.0/0" }, - Protocol = "-1" - }).ToArray() -}); -``` - -```go -ports := []float64{22, 80, 443, 5432} -ingress := make([]securitygroup.SecurityGroupIngress, 0) -for _, port := range ports { - ingress = append(ingress, securitygroup.SecurityGroupIngress{ - FromPort: jsii.Number(port), - ToPort: jsii.Number(port), - CidrBlocks: &[]*string{jsii.String("0.0.0.0/0")}, - Protocol: jsii.String("-1"), - }) -} - -securitygroup.NewSecurityGroup(stack, jsii.String("security2"), &securitygroup.SecurityGroupConfig{ - Name: jsii.String("security2"), - VpcId: jsii.String("vpcs"), - Egress: &[]securitygroup.SecurityGroupEgress{ - { - FromPort: jsii.Number(0), - ToPort: jsii.Number(0), - CidrBlocks: &[]*string{jsii.String("0.0.0.0/0")}, - Protocol: jsii.String("-1"), - }, - }, - Ingress: &ingress, -}) -``` - - - -## Special Cases - -### Built-in `terraform_data` resource - -The [`terraform_data`](/terraform/language/resources/terraform-data) resource implements the standard resource lifecycle but does not directly perform any other actions. In CDKTF, the resource is exposed as the `TerraformData` class and you can import it directly from the `cdktf` package. - -### Large Resource Configurations - -A few individual Terraform Resources have very deeply nested schemas with a lot of attributes. This blows up the config classes and slows down the code generation for languages besides Typescript. To work around this we sometimes limit the depth of the config classes and use `any` on deeper level, some attributes we directly expose as `any` on the top level config class. - -- `aws` Provider: - - `aws_quicksight_template.definition`, `aws_quicksight_dashboard.definition`, and `aws_quicksight_analysis.definition` are set to `any` - - `wafv2` related resources have a lot of deeply nested attributes that might be skipped diff --git a/website/docs/cdktf/resources/basics.mdx b/website/docs/cdktf/resources/basics.mdx new file mode 100644 index 0000000000..ddfcae61d3 --- /dev/null +++ b/website/docs/cdktf/resources/basics.mdx @@ -0,0 +1,397 @@ +--- +page_title: Resources - CDK for Terraform +description: >- + Resources describe one or more infrastructure objects, like virtual networks, compute instances, and DNS records. Define resources in a CDK for Terraform application. +--- + +# Resources + +Resources are the most important element when defining infrastructure in CDKTF applications. Each resource describes one or more infrastructure objects, such as virtual networks, compute instances, or higher-level components such as DNS records. + +In your CDK for Terraform (CDKTF) application, you will use your preferred programming language to define the resources you want Terraform to manage on one or more [providers](/terraform/cdktf/concepts/providers). This page explains how to use resources in your application and how to use [escape hatches](#escape-hatch) to change resource behavior when necessary. + +## Define Resources + +Resource definitions and properties vary depending on the type of resource and the provider. Consult your provider's documentation for a full list of available resources and their configuration options. + +The following example defines a [DynamoDB table](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dynamodb_table) resource on the AWS provider. + + + + + + + + + +```ts +export class ResourcesStack extends TerraformStack { + constructor(scope: Construct, id: string) { + super(scope, id); + + new AwsProvider(this, "aws", { + region: "eu-central-1", + }); + + const region = new DataAwsRegion(this, "region"); + + new DynamodbTable(this, "first-table", { + name: `my-first-table-${region.name}`, + hashKey: "temp", + attribute: [{ name: "id", type: "S" }], + billingMode: "PAY_PER_REQUEST", + }); + } +} +``` + +```java +import software.constructs.Construct; +import com.hashicorp.cdktf.TerraformStack; +import imports.aws.dynamodb_table.DynamodbTable; +import imports.aws.dynamodb_table.DynamodbTableAttribute; +import imports.aws.dynamodb_table.DynamodbTableConfig; + + +import java.util.Arrays; + +public class MainResourcesDefine extends TerraformStack { + + public MainResourcesDefine(Construct scope, String id){ + super(scope, id); + + new AwsProvider(this, "aws", AwsProviderConfig.builder() + .region("us-east-1") + .build() + ); + + DataAwsRegion region = new DataAwsRegion(this, "region"); + + new DynamodbTable(this, "hello", DynamodbTableConfig.builder() + .name("my-first-table-"+region.getName()) + .hashKey("temp") + .attribute(Arrays.asList( + DynamodbTableAttribute.builder() + .name("id") + .type("S") + .build() + ) + ) + .billingMode("PAY_PER_REQUEST") + .build() + ); + } +} +``` + +```python +from constructs import Construct +from cdktf import TerraformStack + +class ResourceStack(TerraformStack): + def __init__(self, scope: Construct, id: str): + super().__init__(scope, id) + + AwsProvider(self, "aws", + region="us-east-1" + ) + + region = DataAwsRegion(self, "region") + + DynamodbTable(self, "Hello", + name="my-first-table-{}".format(region.name), + hash_key="temp", + attribute=[{"name": "id", "type": "S"}], + billing_mode="PAY_PER_REQUEST" + ) +``` + +```csharp + DataAwsRegion region = new DataAwsRegion(this, "region"); + + new DynamodbTable(this, "first-table", new DynamodbTableConfig + { + Name = $"my-first-table-{region.Name}", + HashKey = "temp", + Attribute = new DynamodbTableAttribute[] { + new DynamodbTableAttribute { + Name = "id", + Type = "S" + } + }, + BillingMode = "PAY_PER_REQUEST" + }); +``` + +```go +import ( + "fmt" + + "github.com/aws/constructs-go/constructs/v10" + "github.com/aws/jsii-runtime-go" + "github.com/hashicorp/terraform-cdk-go/cdktf" + "github.com/hashicorp/terraform-cdk/examples/go/documentation/generated/hashicorp/aws/dataawsregion" + "github.com/hashicorp/terraform-cdk/examples/go/documentation/generated/hashicorp/aws/dynamodbtable" + aws "github.com/hashicorp/terraform-cdk/examples/go/documentation/generated/hashicorp/aws/provider" + +) + +func NewResourcesStack(scope constructs.Construct, name string) cdktf.TerraformStack { + stack := cdktf.NewTerraformStack(scope, &name) + + aws.NewAwsProvider(stack, jsii.String("aws"), &aws.AwsProviderConfig{ + Region: jsii.String("eu-central-1"), + }) + + region := dataawsregion.NewDataAwsRegion(stack, jsii.String("region"), &dataawsregion.DataAwsRegionConfig{}) + + dynamodbtable.NewDynamodbTable(stack, jsii.String("first-table"), &dynamodbtable.DynamodbTableConfig{ + Name: jsii.String(fmt.Sprintf("my-first-table-%s", *region.Name())), + HashKey: jsii.String("id"), + Attribute: []map[string]string{ + {"name": "id", "type": "S"}, + }, + BillingMode: jsii.String("PAY_PER_REQUEST"), + }) + + + return stack +} + +``` + + + +The [examples page](/terraform/cdktf/examples-and-guides/examples) contains multiple example projects for every supported programming language. + +## Scope + +You can instantiate the same resource multiple times throughout your infrastructure. For example, you may want to create multiple S3 Buckets with different configurations. Instances that share the same parent element are considered to be part of the same scope. You must set a different `name` property for each instance to avoid naming conflicts. + +Refer to the [constructs documentation](/terraform/cdktf/concepts/constructs#scope) for more details and an example. + +## References + +You can reference resource properties throughout your configuration. For example, you may want to use the name of a parent resource when assigning names to related child resources. Refer to your provider's documentation for a full list of available properties for each resource type. + +To create references, call `myResource.` on the resource instance. For example, you could use `myResource.name` to retrieve the `name` property from `myResource`. Terraform does not support passing an entire block (e.g. `exampleNamespace.metadata`) into a resource or data source, so you must create a reference for each individual property. + +References are also useful when you need to track logical dependencies. For example, Kubernetes resources live in a namespace, so a namespace must exist before Terraform can provision the associated resources. The following example uses a reference for the namespace property in the the deployment. This reference tells Terraform that it needs to create the namespace before creating the resources. + + + + + + + + + +```ts +const exampleNamespace = new Namespace(this, "tf-cdk-example", { + metadata: { + name: "tf-cdk-example", + }, +}); + +new Deployment(this, "nginx-deployment", { + metadata: { + name: "nginx", + namespace: exampleNamespace.metadata.name, // Reference the namespace name propery + labels: { + app: "my-app", + }, + }, + spec: { + // ... + }, +}); +``` + +```java +import imports.kubernetes.namespace.Namespace; +import imports.kubernetes.namespace.NamespaceConfig; +import imports.kubernetes.namespace.NamespaceMetadata; + Namespace exampleNamespace = new Namespace(this, "tf-cdk-example", NamespaceConfig.builder() + .metadata(NamespaceMetadata.builder() + .name("tf-cdk-example") + .build()) + .build()); + + new Deployment(this, "nginx-deployment", DeploymentConfig.builder() + .metadata(DeploymentMetadata.builder() + .name("nginx") + .namespace(exampleNamespace.getMetadata().getName()) // Reference the name property + .labels(new HashMap() { + { + put("app", app.toString()); + } + }) + .build()) + .spec(DeploymentSpec.builder() + .selector(DeploymentSpecSelector.builder() + .matchLabels(new HashMap(){{ + put("app", app.toString()); + }}) + .build() + ) + .replicas("1") + .template(DeploymentSpecTemplate.builder() + .metadata(DeploymentSpecTemplateMetadata.builder() + .labels(new HashMap(){{ + put("app", app.toString()); + }}) + .build() + ) + .spec(DeploymentSpecTemplateSpec.builder() + .container(Arrays.asList(DeploymentSpecTemplateSpecContainer.builder() + .image("nginx:1.7.9") + .name("nginx") + .build() + ) + ) + .build() + ) + .build() + ) + .build() + ) + .build()); +``` + +```python + + exampleNamespace = Namespace(self, "tf-cdk-example", + metadata=NamespaceMetadata(name="tf-cdk-example") + ) + + Deployment(self, "nginx-deployment", + metadata=DeploymentMetadata( + name="nginx", + namespace=exampleNamespace.metadata.name, # Reference the name property + labels={"app": app} + ), + spec=DeploymentSpec( + selector=DeploymentSpecSelector( + match_labels={"app": app} + ), + replicas="1", + template=DeploymentSpecTemplate( + metadata=DeploymentSpecTemplateMetadata( + labels={"app": app} + ), + spec=DeploymentSpecTemplateSpec( + container=[ + DeploymentSpecTemplateSpecContainer( + image="nginx:1.7.9", name="nginx") + ] + ) + ) + ) + ) +``` + +```csharp + Namespace exampleNamespace = new Namespace(this, "tf-cdk-example", new NamespaceConfig + { + Metadata = new NamespaceMetadata + { + Name = "tf-cdk-example" + } + }); + + new Deployment(this, "nginx-deployment", new DeploymentConfig + { + Metadata = new DeploymentMetadata + { + Name = "nginx", + Namespace = exampleNamespace.Metadata.Name, // Reference the name property + Labels = new Dictionary { + { "app", "my-app" } + } + }, + Spec = new DeploymentSpec + { + Template = new DeploymentSpecTemplate + { + Metadata = new DeploymentSpecTemplateMetadata + { + Labels = new Dictionary { + { "app", "my-app" } + } + }, + Spec = new DeploymentSpecTemplateSpec + { + Container = new DeploymentSpecTemplateSpecContainer[] { + new DeploymentSpecTemplateSpecContainer { + Name = "nginx", + Image = "nginx:1.7.9" + } + } + } + } + } + }); +``` + +```go +exampleNamespace := namespace.NewNamespace(stack, jsii.String("tf-cdk-example"), &namespace.NamespaceConfig{ + Metadata: &namespace.NamespaceMetadata{ + Name: jsii.String("tf-cdk-example"), + }, +}) + +deployment.NewDeployment(stack, jsii.String("nginx-deployment"), &deployment.DeploymentConfig{ + Metadata: &deployment.DeploymentMetadata{ + Name: jsii.String("nginx"), + Namespace: exampleNamespace.Metadata().Name(), // Reference the name property + Labels: &map[string]*string{ + "app": jsii.String("my-app"), + }, + }, + Spec: &deployment.DeploymentSpec{ + Template: &deployment.DeploymentSpecTemplate{ + Metadata: &deployment.DeploymentSpecTemplateMetadata{ + Labels: &map[string]*string{ + "app": jsii.String("my-app"), + }, + }, + Spec: &deployment.DeploymentSpecTemplateSpec{ + Container: []deployment.DeploymentSpecTemplateSpecContainer{ + { + Name: jsii.String("nginx"), + Image: jsii.String("nginx:1.7.9"), + }, + }, + }, + }, + }, +}) +``` + + + +## Provisioners + +Provisioners can be used to model specific actions on the local machine or on a remote machine in order to prepare servers or other infrastructure objects for service. You can find more information on the concept of provisioners in the [Terraform docs](/terraform/language/resources/provisioners/syntax). You can pass the `provisioners` key to define a list of provisioners, connections can be configured with the `connection` key. A working example can be found at [examples/typescript/provisioner](https://github.com/hashicorp/terraform-cdk/blob/main/examples/typescript/provisioner/main.ts). + +If you need to use the special [`self` object](/terraform/language/resources/provisioners/syntax#the-self-object) that can only be used in `provisioner` and `connection` blocks to refer to the parent resource you can use the `TerraformSelf` class like this: `TerraformSelf.getString("public_ip")`. + +## Custom Condition Checks + +If you need to ensure a condition is met either before or after a resource was created you can specify [conditions](/terraform/language/expressions/custom-conditions#preconditions-and-postconditions). +To add one configure the `lifecycle` key on your resource with an object containing a `precondition` and / or a `postcondition`. These keys take a list of conditions with a `condition` key containing a Terraform Expression to be evaluated and an `errorMessage` key containing a string to be displayed if the condition is not met. + + +## Special Cases + +### Built-in `terraform_data` resource + +The [`terraform_data`](/terraform/language/resources/terraform-data) resource implements the standard resource lifecycle but does not directly perform any other actions. In CDKTF, the resource is exposed as the `TerraformData` class and you can import it directly from the `cdktf` package. + +### Large Resource Configurations + +A few individual Terraform Resources have very deeply nested schemas with a lot of attributes. This blows up the config classes and slows down the code generation for languages besides Typescript. To work around this we sometimes limit the depth of the config classes and use `any` on deeper level, some attributes we directly expose as `any` on the top level config class. + +- `aws` Provider: + - `aws_quicksight_template.definition`, `aws_quicksight_dashboard.definition`, and `aws_quicksight_analysis.definition` are set to `any` + - `wafv2` related resources have a lot of deeply nested attributes that might be skipped diff --git a/website/docs/cdktf/resources/escape-hatch.mdx b/website/docs/cdktf/resources/escape-hatch.mdx new file mode 100644 index 0000000000..d7d50f2cb6 --- /dev/null +++ b/website/docs/cdktf/resources/escape-hatch.mdx @@ -0,0 +1,545 @@ +--- +page_title: Escape Hatches - CDK for Terraform +description: >- + Resources describe one or more infrastructure objects, like virtual networks, compute instances, and DNS records. Define resources in a CDK for Terraform application. +--- + + + + +## Escape Hatch + +Terraform provides [meta-arguments](/terraform/language/resources/syntax#meta-arguments) to change resource behavior. For example, the `for_each` meta-argument creates multiple resource instances according to a map, or set of strings. The escape hatch allows you to use these meta-arguments to your CDKTF application and to override attributes that CDKTF cannot yet fully express. + +The following example defines a provisioner for a resource using the `addOverride` method. + + + + + + + + + +```ts +const tableName = "my-second-table"; + +const table = new DynamodbTable(this, "second-table", { + name: tableName, + hashKey: "id", + attribute: [{ name: "id", type: "S" }], +}); + +table.addOverride("provisioner", [ + { + "local-exec": { + command: `aws dynamodb create-backup --table-name ${tableName} --backup-name ${tableName}-backup`, + }, + }, +]); +``` + +```java +import com.hashicorp.cdktf.TerraformVariable; +import com.hashicorp.cdktf.TerraformVariableConfig; +import imports.aws.dynamodb_table.DynamodbTable; +import imports.aws.dynamodb_table.DynamodbTableAttribute; +import imports.aws.dynamodb_table.DynamodbTableConfig; + String tableName = "my-table"; + + DynamodbTable table = new DynamodbTable(this, "Hello", DynamodbTableConfig.builder() + .name(tableName) + .hashKey("id") + .attribute(Arrays.asList( + DynamodbTableAttribute.builder() + .name("id") + .type("S") + .build())) + .build()); + + table.addOverride("provisioner", Arrays.asList( + new HashMap() { + { + put("local-exec", new HashMap() { + { + put("command", "aws dynamodb create-backup --table-name " + + tableName + " --backup-name " + + tableName + "-backup"); + } + }); + } + })); +``` + +```python + + tableName = "my-table" + + table = DynamodbTable(self, "Hello", + name=tableName, + hash_key="id", + attribute=[{"name": "id", "type": "S"}] + ) + + table.add_override("provisioner", [ + { + "local-exec": { + "command": f"aws dynamodb create-backup --table-name {tableName} --backup-name {tableName}-backup" + } + } + ]) +``` + +```csharp + String tableName = "my-second-table"; + DynamodbTable table = new DynamodbTable(this, "second-table", new DynamodbTableConfig + { + Name = tableName, + HashKey = "id", + Attribute = new DynamodbTableAttribute[] { + new DynamodbTableAttribute { + Name = "id", + Type = "S" + } + } + }); + + table.AddOverride("provisioner", new Dictionary[] { + new Dictionary { + { "local-exec", new Dictionary { + { "command", $"aws dynamodb create-backup --table-name {tableName} --backup-name {tableName}-backup" } + } } + } + }); + +``` + +```go +tableName := "my-second-table" +table := dynamodbtable.NewDynamodbTable(stack, jsii.String("second-table"), &dynamodbtable.DynamodbTableConfig{ + Name: &tableName, + HashKey: jsii.String("id"), + Attribute: []map[string]string{ + {"name": "id", "type": "S"}, + }, +}) +table.AddOverride(jsii.String("provisioner"), []map[string]map[string]string{ + {"local-exec": { + "command": fmt.Sprintf( + "aws dynamodb create-backup --table-name %s --backup-name %s-backup", + tableName, + tableName, + ), + }}, +}) +``` + + + +When you run `cdktf synth`, CDKTF generates a Terraform configuration with the [provisioner added to the JSON object](/terraform/language/syntax/json#nested-block-mapping). + +```json +{ + "resource": { + "aws_dynamodb_table": { + "helloterraHello69872235": { + "hash_key": "temp", + "name": "my-table", + "attribute": [ + { + "name": "id", + "type": "S" + } + ], + "provisioner": [ + { + "local-exec": { + "command": "aws dynamodb create-backup --table-name my-table --backup-name my-table-backup" + } + } + ] + } + } + } +} +``` + +To override an attribute, include the resource attribute key in `addOverride`. The attribute in the escape hatch is in snake case because the Terraform JSON configuration uses snake case instead of camel case. + + + + + + + + + +```ts +const topic = new SnsTopic(this, "Topic", { + displayName: "will-be-overwritten", +}); +topic.addOverride("display_name", "my-topic"); +``` + +```java +new SnsTopic(this, "Topic", SnsTopicConfig.builder() + .displayName("will-be-overwritten") + .build()).addOverride("display_name", "my-topic"); +``` + +```python + + topic = SnsTopic(self, "Topic", + display_name="will-be-overwritten" + ) + + topic.add_override("display_name", "my-topic") +``` + +```csharp +SnsTopic topic = new SnsTopic(this, "Topic", new SnsTopicConfig +{ + DisplayName = "will-be-overwritten" +}); +topic.AddOverride("display_name", "my-topic"); +``` + +```go +topic := snstopic.NewSnsTopic(stack, jsii.String("Topic"), &snstopic.SnsTopicConfig{ + DisplayName: jsii.String("will-be-overwritten"), +}) +topic.AddOverride(jsii.String("display_name"), jsii.String("my-topic")) +``` + + + +When you run `cdktf synth`, CDKTF generates a Terraform configuration with the value overwritten. + +```json +{ + "resource": { + "aws_sns_topic": { + "helloterraTopic6609C1D4": { + "display_name": "my-topic" + } + } + } +} +``` + +Use a dot notation to access elements in arrays: `resource.addOverride("configurations.0.https", true)`. + +### Escape Hatch for Dynamic Blocks + +Terraform configurations sometimes use [`dynamic` blocks](/terraform/language/expressions/dynamic-blocks) to create related resources based on dynamic data, or data that is only known after Terraform provisions the infrastructure. For example, you could create a series of nested blocks for a series of Virtual Private Cloud (VPC) ingress ports. A `dynamic` block loops over a complex value and generates a nested resource block for each element of that complex value. + +In CDKTF applications, you must use an escape hatch when you want to loop through a dynamic value like a `TerraformVariable` or a resource output. + +To use an escape hatch to loop over dynamic data, you must: + +- Set the first argument of `addOverride` to be `dynamic.`. +- Create a `for_each` value for the second argument and set it to the list you want to iterate over. +- Take the attribute as base for the reference when you reference values from the list. For example, use `"${.value.nested_value}"`. + +The following example adds ingress values by looping through the ports passed as `TerraformVariable`. + + + + + + + + + +```ts +const portsList = new TerraformVariable(this, "ports", { + type: "list", + default: [22, 80, 443, 5432], +}); + +const sg = new SecurityGroup(this, "security1", { + name: "security1", + vpcId: "vpcs", + egress: [ + { + fromPort: 0, + toPort: 0, + cidrBlocks: ["0.0.0.0/0"], + protocol: "-1", + }, + ], +}); +sg.addOverride("dynamic.ingress", { + for_each: portsList.listValue, + content: { + from_port: "${ingress.value}", + to_port: "${ingress.value}", + cidr_blocks: ["0.0.0.0/0"], + protocol: "-1", + }, +}); +``` + +```java +import com.hashicorp.cdktf.TerraformVariable; +import com.hashicorp.cdktf.TerraformVariableConfig; +import imports.aws.security_group.*; + TerraformVariable ports = new TerraformVariable(this, "ports", TerraformVariableConfig.builder() + .type("list") + .defaultValue(Arrays.asList(22, 80, 443, 5432)) + .build()); + + SecurityGroup sq = new SecurityGroup(this, "sec1grp", SecurityGroupConfig.builder() + .name("security1") + .vpcId("vpcs") + .egress(Arrays.asList( + SecurityGroupEgress.builder() + .fromPort(0) + .toPort(0) + .cidrBlocks(Arrays.asList("0.0.0.0/0")) + .protocol("-1") + .build())) + .build()); + + sq.addOverride("dynamic.ingress", new HashMap() { + { + put("for_each", ports.getListValue()); + put("content", new HashMap() { + { + put("from_port", "${ingress.value}"); + put("to_port", "${ingress.value}"); + put("cidr_blocks", Arrays.asList("0.0.0.0/0")); + put("protocol", "-1"); + } + }); + } + }); +``` + +```python + + ports = TerraformVariable(self, "ports", + type="list", + default=[22, 80, 443, 5432] + ) + + sq = SecurityGroup(self, "sec1grp", + name="security1", + vpc_id="vpcs", + egress=[ + { + "from_port": 0, + "to_port": 0, + "cidr_blocks": ["0.0.0.0/0"], + "protocol": "-1" + } + ] + ) + + sq.add_override("dynamic.ingress", { + "for_each": ports.list_value, + "content": { + "from_port": "${ingress.value}", + "to_port": "${ingress.value}", + "cidr_blocks": ["0.0.0.0/0"], + "protocol": "-1" + } + }) +``` + +```csharp +TerraformVariable portsList = new TerraformVariable(this, "ports", new TerraformVariableConfig +{ + Type = "list", +}); +SecurityGroup sg = new SecurityGroup(this, "security1", new SecurityGroupConfig +{ + Name = "security1", + VpcId = "vpcs", + Egress = new SecurityGroupEgress[] { + new SecurityGroupEgress { + FromPort = 0, + ToPort = 0, + CidrBlocks = new string[] { "0.0.0.0/0" }, + Protocol = "-1" + } + } +}); +sg.AddOverride("dynamic.ingress", new Dictionary { + { "for_each", portsList.ListValue }, + { "content", new Dictionary { + { "from_port", "${ingress.value}" }, + { "to_port", "${ingress.value}" }, + { "cidr_blocks", new string[] { "0.0.0.0/0" } }, + { "protocol", "-1" } + }} +}); +``` + +```go +portsList := cdktf.NewTerraformVariable(stack, jsii.String("ports"), &cdktf.TerraformVariableConfig{ + Type: jsii.String("list"), + Default: []int{22, 80, 443, 5432}, +}) +sg := securitygroup.NewSecurityGroup(stack, jsii.String("security1"), &securitygroup.SecurityGroupConfig{ + Name: jsii.String("security1"), + VpcId: jsii.String("vpcs"), + Egress: &[]securitygroup.SecurityGroupEgress{ + { + FromPort: jsii.Number(0), + ToPort: jsii.Number(0), + CidrBlocks: &[]*string{jsii.String("0.0.0.0/0")}, + Protocol: jsii.String("-1"), + }, + }, +}) +sg.AddOverride(jsii.String("dynamic.ingress"), &map[string]interface{}{ + "for_each": portsList.ListValue(), + "content": &map[string]interface{}{ + "from_port": "${ingress.value}", + "to_port": "${ingress.value}", + "cidr_blocks": []string{"0.0.0.0/0"}, + "protocol": "-1", + }, +}) +``` + + + +You should only use escape hatches when you need to work with dynamic values that are unknown until after Terraform provisions your infrastructure. If you are working with static values, we recommend using the functionality available in your preferred programming language to iterate through the array. + +The following example loops through the ports without using an escape hatch. + + + + + + + + + +```ts +const ports = [22, 80, 443, 5432]; + +new SecurityGroup(this, "security2", { + name: "security2", + vpcId: "vpcs", + egress: [ + { + fromPort: 0, + toPort: 0, + cidrBlocks: ["0.0.0.0/0"], + protocol: "-1", + }, + ], + ingress: ports.map((port) => ({ + fromPort: port, + toPort: port, + cidrBlocks: ["0.0.0.0/0"], + protocol: "-1", + })), +}); +``` + +```java + List myPorts = Arrays.asList(22, 80, 443, 5432); + List ingress = new ArrayList(); + myPorts.forEach(port -> ingress.add( + SecurityGroupIngress.builder() + .toPort(port) + .fromPort(port) + .cidrBlocks(Arrays.asList("0.0.0.0/0")) + .protocol("-1") + .build())); + + new SecurityGroup(this, "sec2grp", SecurityGroupConfig.builder() + .name("security1") + .vpcId("vpcs") + .egress(Arrays.asList( + SecurityGroupEgress.builder() + .fromPort(0) + .toPort(0) + .cidrBlocks(Arrays.asList("0.0.0.0/0")) + .protocol("-1") + .build())) + .ingress(ingress) + .build()); +``` + +```python + ports = [22, 80, 443, 5432] + + SecurityGroup(self, "sec1grp", + name="security1", + vpc_id="vpcs", + egress=[ + { + "fromPort": 0, + "toPort": 0, + "ciderBlocks": ["0.0.0.0/0"], + "protocol": "-1" + } + ], + ingress=[ + SecurityGroupIngress( + from_port=port, + to_port=port, + protocol="-1", + cidr_blocks=["0.0.0.0/0"] + ) for port in ports + ] + ) +``` + +```csharp +int[] ports = new int[] { 22, 80, 443, 5432 }; +new SecurityGroup(this, "security2", new SecurityGroupConfig +{ + Name = "security2", + VpcId = "vpcs", + Egress = new SecurityGroupEgress[] { + new SecurityGroupEgress { + FromPort = 0, + ToPort = 0, + CidrBlocks = new string[] { "0.0.0.0/0" }, + Protocol = "-1" + } + }, + Ingress = ports.Select(port => new SecurityGroupIngress + { + FromPort = port, + ToPort = port, + CidrBlocks = new string[] { "0.0.0.0/0" }, + Protocol = "-1" + }).ToArray() +}); +``` + +```go +ports := []float64{22, 80, 443, 5432} +ingress := make([]securitygroup.SecurityGroupIngress, 0) +for _, port := range ports { + ingress = append(ingress, securitygroup.SecurityGroupIngress{ + FromPort: jsii.Number(port), + ToPort: jsii.Number(port), + CidrBlocks: &[]*string{jsii.String("0.0.0.0/0")}, + Protocol: jsii.String("-1"), + }) +} + +securitygroup.NewSecurityGroup(stack, jsii.String("security2"), &securitygroup.SecurityGroupConfig{ + Name: jsii.String("security2"), + VpcId: jsii.String("vpcs"), + Egress: &[]securitygroup.SecurityGroupEgress{ + { + FromPort: jsii.Number(0), + ToPort: jsii.Number(0), + CidrBlocks: &[]*string{jsii.String("0.0.0.0/0")}, + Protocol: jsii.String("-1"), + }, + }, + Ingress: &ingress, +}) +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/resources/importing.mdx b/website/docs/cdktf/resources/importing.mdx new file mode 100644 index 0000000000..a474af652e --- /dev/null +++ b/website/docs/cdktf/resources/importing.mdx @@ -0,0 +1,234 @@ +--- +page_title: Importing Resources - CDK for Terraform +description: >- + TODO +--- + +## Importing Resources + +If you have existing resources that you want to manage with CDKTF, you can import them into your CDKTF application. The best way to do this is using the [`import` block feature](/terraform/language/import) of Terraform >= 1.5. You can do this in CDKTF either with a specified configuration or without. + +### How To Import + +To import a resource, first instantiate an instance of the resource type you wish to import – in our case we'll be using an S3Bucket. No configuration is explicitly needed. You then call the `importFrom` method on the resource object. This method takes the ID of the resource to be imported as the first argument and the provider as an optional second. The provider is only required if you have multiple providers of the same type in your configuration. + +```typescript +new S3Bucket(this, "bucket", {}).importFrom(bucketId); +``` + +When running plan / apply you will get the information that your resource is going to be imported. Once you have ran apply, you can remove the `importFrom` call and the resource will become managed by CDKTF. + +Please note that Terraform is going to update existing fields on the imported resource to match your configuration as it puts it under management. In our case we did not define any specific properties on the `S3Bucket` which causes Terraform e.g. to remove the tags currently defined on the resource (as can be seen on the plan below). If you want to keep existing settings, you can run a plan first, add everything that Terraform would change to your resource config, and only then apply the changes. + +Your output might look as follows: + +``` +ts-import Initializing the backend... +ts-import Initializing provider plugins... +ts-import - Reusing previous version of hashicorp/aws from the dependency lock file +ts-import - Using previously-installed hashicorp/aws v5.5.0 +ts-import Terraform has been successfully initialized! + + You may now begin working with Terraform. Try running "terraform plan" to see + any changes that are required for your infrastructure. All Terraform commands + should now work. + + If you ever set or change modules or backend configuration for Terraform, + rerun this command to reinitialize your working directory. If you forget, other + commands will detect it and remind you to do so if necessary. +ts-import aws_s3_bucket.bucket (bucket): Preparing import... [id=best-bucket-in-the-world] +ts-import aws_s3_bucket.bucket (bucket): Refreshing state... [id=best-bucket-in-the-world] +ts-import Terraform used the selected providers to generate the following execution + plan. Resource actions are indicated with the following symbols: + ~ update in-place + + Terraform will perform the following actions: +ts-import # aws_s3_bucket.bucket (bucket) will be updated in-place + # (imported from "best-bucket-in-the-world") + ~ resource "aws_s3_bucket" "bucket" { + arn = "arn:aws:s3:::best-bucket-in-the-world" + bucket = "best-bucket-in-the-world" + bucket_domain_name = "best-bucket-in-the-world.s3.amazonaws.com" + bucket_regional_domain_name = "best-bucket-in-the-world.s3.us-east-1.amazonaws.com" + + force_destroy = false + hosted_zone_id = "XXXXXXXXXXXXX" + id = "best-bucket-in-the-world" + object_lock_enabled = false + region = "us-east-1" + request_payer = "BucketOwner" + ~ tags = { + - "foo" = "bar" -> null + } + ~ tags_all = { + - "foo" = "bar" + } -> (known after apply) + + grant { + id = "XXXXXXXXXXXXX" + permissions = [ + "FULL_CONTROL", + ] + type = "CanonicalUser" + } + + server_side_encryption_configuration { + rule { + bucket_key_enabled = true + + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } + } + + versioning { + enabled = true + mfa_delete = false + } + } + + Plan: 1 to import, 0 to add, 1 to change, 0 to destroy. + + ───────────────────────────────────────────────────────────────────────────── + + Saved the plan to: plan + + To perform exactly these actions, run the following command to apply: + terraform apply "plan" +``` + +### Generate Configuration For Import + +If you don't want to specify the configuration of your imported resource yourself you can use the static method `generateConfigForImport` on the class of the resource you want to import. This method takes the scope as the first argument, the construct id of the resource to import to (as will be given in the generated config returned), the resource id of the resource to be imported, and the provider as an optional fourth. The provider is only required if you have multiple providers of the same type in your configuration. + +```typescript +S3Bucket.generateConfigForImport(this, "bucket", bucketId); +``` + +When running `cdktf plan ` Terraform will generate code for the resource you are importing and CDKTF will convert it to the language you are using. + +Your output might look as follows: + +``` +ts-import-with-configuration Initializing the backend... +ts-import-with-configuration Initializing provider plugins... + - Reusing previous version of hashicorp/aws from the dependency lock file +ts-import-with-configuration - Using previously-installed hashicorp/aws v5.18.1 + + Terraform has been successfully initialized! +ts-import-with-configuration + You may now begin working with Terraform. Try running "terraform plan" to see + any changes that are required for your infrastructure. All Terraform commands + should now work. + + If you ever set or change modules or backend configuration for Terraform, + rerun this command to reinitialize your working directory. If you forget, other + commands will detect it and remind you to do so if necessary. +ts-import-with-configuration aws_s3_bucket.bucket: Preparing import... [id=best-bucket-in-the-world] +ts-import-with-configuration aws_s3_bucket.bucket: Refreshing state... [id=best-bucket-in-the-world] +ts-import-with-configuration Terraform will perform the following actions: +ts-import-with-configuration # aws_s3_bucket.bucket will be imported + # (config will be generated) + resource "aws_s3_bucket" "bucket" { + arn = "arn:aws:s3:::best-bucket-in-the-world" + bucket = "best-bucket-in-the-world" + bucket_domain_name = "best-bucket-in-the-world.s3.amazonaws.com" + bucket_regional_domain_name = "best-bucket-in-the-world.s3.us-east-1.amazonaws.com" + hosted_zone_id = "Z3AQBSTGFYJSTF" + id = "best-bucket-in-the-world" + object_lock_enabled = false + region = "us-east-1" + request_payer = "BucketOwner" + tags = {} + tags_all = {} + + grant { + id = "554912fda2704333d162d216be50aefb05562e0bf1709997f1d9417cf46087d5" + permissions = [ + "FULL_CONTROL", + ] + type = "CanonicalUser" + } + + server_side_encryption_configuration { + rule { + bucket_key_enabled = true + + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } + } + + versioning { + enabled = false + mfa_delete = false + } + } + + Plan: 1 to import, 0 to add, 0 to change, 0 to destroy. + ╷ + │ Warning: Config generation is experimental + │ + │ Generating configuration during import is currently experimental, and the + │ generated configuration format may change in future versions. + ╵ + + ───────────────────────────────────────────────────────────────────────────── + + Terraform has generated configuration and written it to + generated_resources.tf. Please review the configuration and edit it as + necessary before adding it to version control. + + Saved the plan to: plan + + To perform exactly these actions, run the following command to apply: + terraform apply "plan" +ts-import-with-configuration Import without configuration detected. Terraform has created configuration for it: + # __generated__ by Terraform + # Please review these resources and move them into your main configuration files. + + # __generated__ by Terraform from "best-bucket-in-the-world" + resource "aws_s3_bucket" "bucket" { + bucket = "best-bucket-in-the-world" + bucket_prefix = null + force_destroy = null + object_lock_enabled = false + tags = {} + tags_all = {} + } + + + CDKTF has translated the code to the following: + + import { Construct } from "constructs"; + /* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ + import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; + class MyConvertedCode extends Construct { + constructor(scope: Construct, name: string) { + super(scope, name); + new S3Bucket(this, "bucket", { + bucket: "best-bucket-in-the-world", + bucketPrefix: [null], + forceDestroy: [null], + objectLockEnabled: false, + tags: {}, + tagsAll: {}, + }); + } + } + + + Please review the code and make any necessary changes before adding it to your codebase. + Make sure to only copy the code within the construct's constructor. + + NOTE: Your resource has not yet become managed by CDKTF. + To finish the import remove the call "generateConfigForImport", add the above code within the construct's constructor, and then append the call importFrom() to the generated code: + + new SomeResource(...).importFrom("some_id") +``` + +Though at this point, your resource has not been imported. To import, first add the new generated configuration to your project, then remove the initial call of `generateConfigForImport`. Finally, follow the steps outlined in the section "How To Import" above. On apply, your resource will be imported, then becoming managed by CDKTF. diff --git a/website/docs/cdktf/resources/refactoring.mdx b/website/docs/cdktf/resources/refactoring.mdx new file mode 100644 index 0000000000..ea056a4ce2 --- /dev/null +++ b/website/docs/cdktf/resources/refactoring.mdx @@ -0,0 +1,21 @@ +--- +page_title: Resource Refactoring - CDK for Terraform +description: >- + TODO +--- + +## Refactoring & Renaming Resources + +When working with your infrastructure definitions and the need arises to refactor or rename resources without destroying and recreating them, you can leverage the `moveTo` function like so: + +```ts +new S3Bucket(this, "test-bucket-move-to", { + bucket: "move-bucket-name", +}).addMoveTarget("move-s3"); + +new S3Bucket(this, "test-bucket-move-from", { + bucket: "move-bucket-name", +}).moveTo("move-s3"); +``` + +Refer to our [Refactoring Guide](/terraform/cdktf/examples-and-guides/refactoring#moving-renaming-resources-within-a-stack) for more information