diff --git a/examples/data-sources/biganimal_analytics_cluster/data-source.tf b/examples/data-sources/biganimal_analytics_cluster/data-source.tf new file mode 100644 index 00000000..1ac6c615 --- /dev/null +++ b/examples/data-sources/biganimal_analytics_cluster/data-source.tf @@ -0,0 +1,78 @@ +variable "cluster_id" { + type = string + description = "The id of the cluster" +} + +variable "project_id" { + type = string + description = "BigAnimal Project ID" +} + +data "biganimal_analytics_cluster" "this" { + cluster_id = var.cluster_id + project_id = var.project_id +} + +output "backup_retention_period" { + value = data.biganimal_analytics_cluster.this.backup_retention_period +} + +output "cluster_name" { + value = data.biganimal_analytics_cluster.this.cluster_name +} + +output "created_at" { + value = data.biganimal_analytics_cluster.this.created_at +} + +output "csp_auth" { + value = coalesce(data.biganimal_analytics_cluster.this.csp_auth, false) +} + +output "instance_type" { + value = data.biganimal_analytics_cluster.this.instance_type +} + +output "metrics_url" { + value = data.biganimal_analytics_cluster.this.metrics_url +} + +output "logs_url" { + value = data.biganimal_analytics_cluster.this.logs_url +} + +output "pg_type" { + value = data.biganimal_analytics_cluster.this.pg_type +} + +output "pg_version" { + value = data.biganimal_analytics_cluster.this.pg_version +} + +output "phase" { + value = data.biganimal_analytics_cluster.this.phase +} + +output "private_networking" { + value = coalesce(data.biganimal_analytics_cluster.this.private_networking, false) +} + +output "cloud_provider" { + value = data.biganimal_analytics_cluster.this.cloud_provider +} + +output "region" { + value = data.biganimal_analytics_cluster.this.region +} + +output "resizing_pvc" { + value = data.biganimal_analytics_cluster.this.resizing_pvc +} + +output "pe_allowed_principal_ids" { + value = data.biganimal_analytics_cluster.this.pe_allowed_principal_ids +} + +output "service_account_ids" { + value = data.biganimal_analytics_cluster.this.service_account_ids +} diff --git a/examples/data-sources/biganimal_analytics_cluster/provider.tf b/examples/data-sources/biganimal_analytics_cluster/provider.tf new file mode 100644 index 00000000..94e312eb --- /dev/null +++ b/examples/data-sources/biganimal_analytics_cluster/provider.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + biganimal = { + source = "EnterpriseDB/biganimal" + version = "0.10.0" + } + } +} diff --git a/examples/resources/biganimal_analytics_cluster/import.sh b/examples/resources/biganimal_analytics_cluster/import.sh new file mode 100644 index 00000000..8f01b424 --- /dev/null +++ b/examples/resources/biganimal_analytics_cluster/import.sh @@ -0,0 +1,2 @@ +# terraform import biganimal_analytics_cluster. / +terraform import biganimal_analytics_cluster.analytics_cluster prj_deadbeef01234567/p-abcd123456 diff --git a/examples/resources/biganimal_analytics_cluster/resource.tf b/examples/resources/biganimal_analytics_cluster/resource.tf new file mode 100644 index 00000000..0def46ca --- /dev/null +++ b/examples/resources/biganimal_analytics_cluster/resource.tf @@ -0,0 +1,74 @@ +terraform { + required_providers { + biganimal = { + source = "EnterpriseDB/biganimal" + version = "0.10.0" + } + random = { + source = "hashicorp/random" + version = "3.6.0" + } + } +} + +resource "random_password" "password" { + length = 16 + special = true + override_special = "!#$%&*()-_=+[]{}<>:?" +} + +variable "cluster_name" { + type = string + description = "The name of the cluster." +} + +variable "project_id" { + type = string + description = "BigAnimal Project ID" +} + +resource "biganimal_analytics_cluster" "analytics_cluster" { + cluster_name = var.cluster_name + project_id = var.project_id + pause = false + + allowed_ip_ranges = [ + { + cidr_block = "127.0.0.1/32" + description = "localhost" + }, + { + cidr_block = "192.168.0.1/32" + description = "description!" + }, + ] + + backup_retention_period = "30d" + csp_auth = false + + instance_type = "aws:m6id.12xlarge" + password = resource.random_password.password.result + + maintenance_window = { + is_enabled = false + start_day = 0 + start_time = "00:00" + } + + pg_type = "epas" + pg_version = "16" + private_networking = false + cloud_provider = "bah:aws" + region = "ap-south-1" + # pe_allowed_principal_ids = [ + # # AWS example: "123456789012", Azure example: "9334e5e6-7f47-aE61-5A4F-ee067daeEf4A", GCP example: "development-data-123456" + # ] + # service_account_ids = [ + # # ex: "test@development-data-123456.iam.gserviceaccount.com" + # ] +} + +output "password" { + sensitive = true + value = resource.biganimal_analytics_cluster.analytics_cluster.password +} diff --git a/pkg/api/api.go b/pkg/api/api.go index 9df4b531..ac2f763c 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -6,6 +6,8 @@ import ( "time" ) +const clientTimeoutSeconds = 60 + type API struct { BaseURL string Token string diff --git a/pkg/api/beacon_analytics_client.go b/pkg/api/beacon_analytics_client.go new file mode 100644 index 00000000..48768613 --- /dev/null +++ b/pkg/api/beacon_analytics_client.go @@ -0,0 +1,21 @@ +package api + +import ( + "net/http" + "time" +) + +type BeaconAnalyticsClient struct { + API +} + +func NewBeaconAnalyticsClient(api API) *BeaconAnalyticsClient { + httpClient := http.Client{ + Timeout: clientTimeoutSeconds * time.Second, + } + + api.HTTPClient = httpClient + c := BeaconAnalyticsClient{API: api} + + return &c +} diff --git a/pkg/api/cloud_provider_client.go b/pkg/api/cloud_provider_client.go index 63f3c4cf..a5f7c1b2 100644 --- a/pkg/api/cloud_provider_client.go +++ b/pkg/api/cloud_provider_client.go @@ -15,7 +15,7 @@ type CloudProviderClient struct{ API } func NewCloudProviderClient(api API) *CloudProviderClient { httpClient := http.Client{ - Timeout: 60 * time.Second, + Timeout: clientTimeoutSeconds * time.Second, } api.HTTPClient = httpClient diff --git a/pkg/api/cluster_client.go b/pkg/api/cluster_client.go index 7c0c6e81..9696a11f 100644 --- a/pkg/api/cluster_client.go +++ b/pkg/api/cluster_client.go @@ -19,7 +19,7 @@ type ClusterClient struct { func NewClusterClient(api API) *ClusterClient { httpClient := http.Client{ - Timeout: 60 * time.Second, + Timeout: clientTimeoutSeconds * time.Second, } api.HTTPClient = httpClient diff --git a/pkg/api/pgd_client.go b/pkg/api/pgd_client.go index 29f05428..fc874801 100644 --- a/pkg/api/pgd_client.go +++ b/pkg/api/pgd_client.go @@ -22,7 +22,7 @@ var clusterClient = ClusterClient{} func NewPGDClient(api API) *PGDClient { httpClient := http.Client{ - Timeout: 60 * time.Second, + Timeout: clientTimeoutSeconds * time.Second, } api.HTTPClient = httpClient diff --git a/pkg/api/project_client.go b/pkg/api/project_client.go index 9babbba5..eac1a965 100644 --- a/pkg/api/project_client.go +++ b/pkg/api/project_client.go @@ -15,7 +15,7 @@ type ProjectClient struct{ API } func NewProjectClient(api API) *ProjectClient { httpClient := http.Client{ - Timeout: 60 * time.Second, + Timeout: clientTimeoutSeconds * time.Second, } api.HTTPClient = httpClient @@ -81,7 +81,6 @@ func (c ProjectClient) List(ctx context.Context, query string) ([]*models.Projec err = json.Unmarshal(body, &response) return response.Data, err - } func (c ProjectClient) Update(ctx context.Context, projectId, projectName string) (string, error) { diff --git a/pkg/api/region_client.go b/pkg/api/region_client.go index 06642247..6f3a763a 100644 --- a/pkg/api/region_client.go +++ b/pkg/api/region_client.go @@ -21,7 +21,7 @@ type RegionClient struct{ API } func NewRegionClient(api API) *RegionClient { httpClient := http.Client{ - Timeout: 60 * time.Second, + Timeout: clientTimeoutSeconds * time.Second, } api.HTTPClient = httpClient diff --git a/pkg/provider/data_source_analytics_cluster.go b/pkg/provider/data_source_analytics_cluster.go new file mode 100644 index 00000000..6a8d6a12 --- /dev/null +++ b/pkg/provider/data_source_analytics_cluster.go @@ -0,0 +1,220 @@ +package provider + +import ( + "context" + + "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/api" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +var ( + _ datasource.DataSource = &analyticsClusterDataSource{} + _ datasource.DataSourceWithConfigure = &analyticsClusterDataSource{} +) + +type analyticsClusterDataSourceModel struct { + analyticsClusterResourceModel +} +type analyticsClusterDataSource struct { + client *api.ClusterClient +} + +func (r *analyticsClusterDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*api.API).ClusterClient() +} + +func (r *analyticsClusterDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_analytics_cluster" +} + +func (r *analyticsClusterDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: "The analytics cluster resource is used to manage BigAnimal analytics clusters.", + // using Blocks for backward compatible + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, + timeouts.Opts{Create: true, Delete: true, Update: true}, + ), + }, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + MarkdownDescription: "Resource ID of the cluster.", + Computed: true, + }, + "allowed_ip_ranges": schema.SetNestedAttribute{ + Description: "Allowed IP ranges.", + Optional: true, + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cidr_block": schema.StringAttribute{ + Description: "CIDR block", + Required: true, + }, + "description": schema.StringAttribute{ + Description: "Description of CIDR block", + Optional: true, + }, + }, + }, + }, + "cluster_id": schema.StringAttribute{ + MarkdownDescription: "Cluster ID.", + Required: true, + }, + "connection_uri": schema.StringAttribute{ + MarkdownDescription: "Cluster connection URI.", + Computed: true, + }, + "cluster_name": schema.StringAttribute{ + MarkdownDescription: "Name of the cluster.", + Computed: true, + }, + "phase": schema.StringAttribute{ + MarkdownDescription: "Current phase of the cluster.", + Computed: true, + }, + "project_id": schema.StringAttribute{ + MarkdownDescription: "BigAnimal Project ID.", + Required: true, + Validators: []validator.String{ProjectIdValidator()}, + }, + "logs_url": schema.StringAttribute{ + MarkdownDescription: "The URL to find the logs of this cluster.", + Computed: true, + }, + "backup_retention_period": schema.StringAttribute{ + MarkdownDescription: "Backup retention period. For example, \"7d\", \"2w\", or \"3m\".", + Optional: true, + Computed: true, + Validators: []validator.String{BackupRetentionPeriodValidator()}, + }, + "cloud_provider": schema.StringAttribute{ + Description: "Cloud provider. For example, \"aws\" or \"bah:aws\".", + Computed: true, + }, + "pg_type": schema.StringAttribute{ + MarkdownDescription: "Postgres type. For example, \"epas\" or \"pgextended\".", + Computed: true, + Validators: []validator.String{stringvalidator.OneOf("epas", "pgextended", "postgres")}, + }, + "first_recoverability_point_at": schema.StringAttribute{ + MarkdownDescription: "Earliest backup recover time.", + Computed: true, + }, + "pg_version": schema.StringAttribute{ + MarkdownDescription: "Postgres version. For example 16", + Computed: true, + }, + "private_networking": schema.BoolAttribute{ + MarkdownDescription: "Is private networking enabled.", + Computed: true, + }, + "password": schema.StringAttribute{ + MarkdownDescription: "Password for the user edb_admin. It must be 12 characters or more.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + MarkdownDescription: "Cluster creation time.", + Computed: true, + }, + "region": schema.StringAttribute{ + MarkdownDescription: "Region to deploy the cluster. See [Supported regions](https://www.enterprisedb.com/docs/biganimal/latest/overview/03a_region_support/) for supported regions.", + Computed: true, + }, + "instance_type": schema.StringAttribute{ + MarkdownDescription: "Instance type. For example, \"azure:Standard_D2s_v3\", \"aws:c5.large\" or \"gcp:e2-highcpu-4\".", + Computed: true, + }, + "resizing_pvc": schema.ListAttribute{ + MarkdownDescription: "Resizing PVC.", + Computed: true, + ElementType: types.StringType, + }, + "metrics_url": schema.StringAttribute{ + MarkdownDescription: "The URL to find the metrics of this cluster.", + Computed: true, + }, + "csp_auth": schema.BoolAttribute{ + MarkdownDescription: "Is authentication handled by the cloud service provider.", + Optional: true, + Computed: true, + }, + "maintenance_window": schema.SingleNestedAttribute{ + MarkdownDescription: "Custom maintenance window.", + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "is_enabled": schema.BoolAttribute{ + MarkdownDescription: "Is maintenance window enabled.", + Required: true, + }, + "start_day": schema.Int64Attribute{ + MarkdownDescription: "The day of week, 0 represents Sunday, 1 is Monday, and so on.", + Optional: true, + Computed: true, + Validators: []validator.Int64{int64validator.Between(0, 6)}, + }, + "start_time": schema.StringAttribute{ + MarkdownDescription: "Start time. \"hh:mm\", for example: \"23:59\".", + Optional: true, + Computed: true, + Validators: []validator.String{startTimeValidator()}, + }, + }, + }, + "service_account_ids": schema.SetAttribute{ + MarkdownDescription: "A Google Cloud Service Account is used for logs. If you leave this blank, then you will be unable to access log details for this cluster. Required when cluster is deployed on BigAnimal's cloud account.", + Optional: true, + Computed: true, + ElementType: types.StringType, + }, + + "pe_allowed_principal_ids": schema.SetAttribute{ + MarkdownDescription: "Cloud provider subscription/account ID, need to be specified when cluster is deployed on BigAnimal's cloud account.", + Optional: true, + Computed: true, + ElementType: types.StringType, + }, + "pause": schema.BoolAttribute{ + MarkdownDescription: "Pause cluster. If true it will put the cluster on pause and set the phase as paused, if false it will resume the cluster and set the phase as healthy. " + + "Pausing a cluster allows you to save on compute costs without losing data or cluster configuration settings. " + + "While paused, clusters aren't upgraded or patched, but changes are applied when the cluster resumes. " + + "Pausing a high availability cluster shuts down all cluster nodes", + Optional: true, + }, + }, + } +} + +func (r *analyticsClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data analyticsClusterDataSourceModel + diags := req.Config.Get(ctx, &data.analyticsClusterResourceModel) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if err := read(ctx, r.client, &data.analyticsClusterResourceModel); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error reading cluster", err.Error()) + } + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, data.analyticsClusterResourceModel)...) +} + +func NewAnalyticsClusterDataSource() datasource.DataSource { + return &analyticsClusterDataSource{} +} diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index 048a9a8b..6ee2472e 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -194,6 +194,7 @@ func (b bigAnimalProvider) DataSources(ctx context.Context) []func() datasource. NewClusterDataSource, NewPgdDataSource, NewRegionsDataSource, + NewAnalyticsClusterDataSource, } } @@ -203,5 +204,6 @@ func (b bigAnimalProvider) Resources(ctx context.Context) []func() resource.Reso NewPgdResource, NewRegionResource, NewClusterResource, + NewAnalyticsClusterResource, } } diff --git a/pkg/provider/resource_analytics_cluster.go b/pkg/provider/resource_analytics_cluster.go new file mode 100644 index 00000000..636e9708 --- /dev/null +++ b/pkg/provider/resource_analytics_cluster.go @@ -0,0 +1,645 @@ +package provider + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/api" + "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models" + commonApi "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models/common/api" + commonTerraform "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models/common/terraform" + "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/plan_modifier" + "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/utils" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +var ( + _ resource.Resource = &analyticsClusterResource{} + _ resource.ResourceWithConfigure = &analyticsClusterResource{} +) + +type analyticsClusterResourceModel struct { + ID types.String `tfsdk:"id"` + CspAuth types.Bool `tfsdk:"csp_auth"` + Region types.String `tfsdk:"region"` + InstanceType types.String `tfsdk:"instance_type"` + ResizingPvc types.List `tfsdk:"resizing_pvc"` + MetricsUrl *string `tfsdk:"metrics_url"` + ClusterId *string `tfsdk:"cluster_id"` + Phase *string `tfsdk:"phase"` + ConnectionUri types.String `tfsdk:"connection_uri"` + ClusterName types.String `tfsdk:"cluster_name"` + FirstRecoverabilityPointAt *string `tfsdk:"first_recoverability_point_at"` + ProjectId string `tfsdk:"project_id"` + LogsUrl *string `tfsdk:"logs_url"` + BackupRetentionPeriod types.String `tfsdk:"backup_retention_period"` + CloudProvider types.String `tfsdk:"cloud_provider"` + PgType types.String `tfsdk:"pg_type"` + Password types.String `tfsdk:"password"` + PgVersion types.String `tfsdk:"pg_version"` + PrivateNetworking types.Bool `tfsdk:"private_networking"` + AllowedIpRanges []AllowedIpRangesResourceModel `tfsdk:"allowed_ip_ranges"` + CreatedAt types.String `tfsdk:"created_at"` + MaintenanceWindow *commonTerraform.MaintenanceWindow `tfsdk:"maintenance_window"` + ServiceAccountIds types.Set `tfsdk:"service_account_ids"` + PeAllowedPrincipalIds types.Set `tfsdk:"pe_allowed_principal_ids"` + Pause types.Bool `tfsdk:"pause"` + + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +func (r analyticsClusterResourceModel) projectId() string { + return r.ProjectId +} + +func (r analyticsClusterResourceModel) clusterId() string { + return *r.ClusterId +} + +type analyticsClusterResource struct { + client *api.ClusterClient +} + +func (r *analyticsClusterResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*api.API).ClusterClient() +} + +func (r *analyticsClusterResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_analytics_cluster" +} + +func (r *analyticsClusterResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: "The analytics cluster resource is used to manage BigAnimal analytics clusters.", + // using Blocks for backward compatible + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, + timeouts.Opts{Create: true, Delete: true, Update: true}, + ), + }, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + MarkdownDescription: "Resource ID of the cluster.", + Computed: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "allowed_ip_ranges": schema.SetNestedAttribute{ + Description: "Allowed IP ranges.", + Optional: true, + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cidr_block": schema.StringAttribute{ + Description: "CIDR block", + Required: true, + }, + "description": schema.StringAttribute{ + Description: "Description of CIDR block", + Optional: true, + }, + }, + }, + PlanModifiers: []planmodifier.Set{setplanmodifier.UseStateForUnknown()}, + }, + "cluster_id": schema.StringAttribute{ + MarkdownDescription: "Cluster ID.", + Computed: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "connection_uri": schema.StringAttribute{ + MarkdownDescription: "Cluster connection URI.", + Computed: true, + PlanModifiers: []planmodifier.String{plan_modifier.CustomConnection()}, + }, + "cluster_name": schema.StringAttribute{ + MarkdownDescription: "Name of the cluster.", + Required: true, + }, + "phase": schema.StringAttribute{ + MarkdownDescription: "Current phase of the cluster.", + Computed: true, + PlanModifiers: []planmodifier.String{plan_modifier.CustomPhaseForUnknown()}, + }, + "project_id": schema.StringAttribute{ + MarkdownDescription: "BigAnimal Project ID.", + Required: true, + Validators: []validator.String{ProjectIdValidator()}, + }, + "logs_url": schema.StringAttribute{ + MarkdownDescription: "The URL to find the logs of this cluster.", + Computed: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "backup_retention_period": schema.StringAttribute{ + MarkdownDescription: "Backup retention period. For example, \"7d\", \"2w\", or \"3m\".", + Optional: true, + Computed: true, + Validators: []validator.String{BackupRetentionPeriodValidator()}, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "cloud_provider": schema.StringAttribute{ + Description: "Cloud provider. For example, \"aws\" or \"bah:aws\".", + Required: true, + }, + "pg_type": schema.StringAttribute{ + MarkdownDescription: "Postgres type. For example, \"epas\" or \"pgextended\".", + Required: true, + Validators: []validator.String{stringvalidator.OneOf("epas", "pgextended", "postgres")}, + }, + "first_recoverability_point_at": schema.StringAttribute{ + MarkdownDescription: "Earliest backup recover time.", + Computed: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "pg_version": schema.StringAttribute{ + MarkdownDescription: "Postgres version. For example 16", + Required: true, + }, + "private_networking": schema.BoolAttribute{ + MarkdownDescription: "Is private networking enabled.", + Optional: true, + }, + "password": schema.StringAttribute{ + MarkdownDescription: "Password for the user edb_admin. It must be 12 characters or more.", + Required: true, + }, + "created_at": schema.StringAttribute{ + MarkdownDescription: "Cluster creation time.", + Computed: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "region": schema.StringAttribute{ + MarkdownDescription: "Region to deploy the cluster. See [Supported regions](https://www.enterprisedb.com/docs/biganimal/latest/overview/03a_region_support/) for supported regions.", + Required: true, + }, + "instance_type": schema.StringAttribute{ + MarkdownDescription: "Instance type. For example, \"azure:Standard_D2s_v3\", \"aws:c5.large\" or \"gcp:e2-highcpu-4\".", + Required: true, + }, + "resizing_pvc": schema.ListAttribute{ + MarkdownDescription: "Resizing PVC.", + Computed: true, + PlanModifiers: []planmodifier.List{listplanmodifier.UseStateForUnknown()}, + ElementType: types.StringType, + }, + "metrics_url": schema.StringAttribute{ + MarkdownDescription: "The URL to find the metrics of this cluster.", + Computed: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "csp_auth": schema.BoolAttribute{ + MarkdownDescription: "Is authentication handled by the cloud service provider.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Bool{boolplanmodifier.UseStateForUnknown()}, + }, + "maintenance_window": schema.SingleNestedAttribute{ + MarkdownDescription: "Custom maintenance window.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{plan_modifier.MaintenanceWindowForUnknown()}, + Attributes: map[string]schema.Attribute{ + "is_enabled": schema.BoolAttribute{ + MarkdownDescription: "Is maintenance window enabled.", + Required: true, + }, + "start_day": schema.Int64Attribute{ + MarkdownDescription: "The day of week, 0 represents Sunday, 1 is Monday, and so on.", + Optional: true, + Computed: true, + Validators: []validator.Int64{int64validator.Between(0, 6)}, + }, + "start_time": schema.StringAttribute{ + MarkdownDescription: "Start time. \"hh:mm\", for example: \"23:59\".", + Optional: true, + Computed: true, + Validators: []validator.String{startTimeValidator()}, + }, + }, + }, + "service_account_ids": schema.SetAttribute{ + MarkdownDescription: "A Google Cloud Service Account is used for logs. If you leave this blank, then you will be unable to access log details for this cluster. Required when cluster is deployed on BigAnimal's cloud account.", + Optional: true, + Computed: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Set{setplanmodifier.UseStateForUnknown()}, + }, + + "pe_allowed_principal_ids": schema.SetAttribute{ + MarkdownDescription: "Cloud provider subscription/account ID, need to be specified when cluster is deployed on BigAnimal's cloud account.", + Optional: true, + Computed: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Set{setplanmodifier.UseStateForUnknown()}, + }, + "pause": schema.BoolAttribute{ + MarkdownDescription: "Pause cluster. If true it will put the cluster on pause and set the phase as paused, if false it will resume the cluster and set the phase as healthy. " + + "Pausing a cluster allows you to save on compute costs without losing data or cluster configuration settings. " + + "While paused, clusters aren't upgraded or patched, but changes are applied when the cluster resumes. " + + "Pausing a high availability cluster shuts down all cluster nodes", + Optional: true, + PlanModifiers: []planmodifier.Bool{boolplanmodifier.UseStateForUnknown()}, + }, + }, + } +} + +func (r *analyticsClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var config analyticsClusterResourceModel + diags := req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + clusterModel, err := generateGenericAnalyticsClusterModel(ctx, r.client, config) + if err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error creating cluster", err.Error()) + } + return + } + + // consume cluster create with analytics request + clusterId, err := r.client.Create(ctx, config.ProjectId, clusterModel) + if err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error creating cluster API request", err.Error()) + } + return + } + + config.ClusterId = &clusterId + + timeout, diagnostics := config.Timeouts.Create(ctx, time.Minute*60) + resp.Diagnostics.Append(diagnostics...) + if resp.Diagnostics.HasError() { + return + } + + // keep retrying until cluster is healthy + if err := ensureClusterIsHealthy(ctx, r.client, config, timeout); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error waiting for the cluster is ready ", err.Error()) + } + return + } + + if config.Pause.ValueBool() { + _, err = r.client.ClusterPause(ctx, config.ProjectId, *config.ClusterId) + if err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error pausing cluster API request", err.Error()) + } + return + } + + // keep retrying until cluster is paused + if err := ensureClusterIsPaused(ctx, r.client, config, timeout); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error waiting for the cluster to pause", err.Error()) + } + return + } + } + + // after cluster is in the correct state (healthy/paused) then get the cluster and save into state + if err := read(ctx, r.client, &config); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error reading cluster", err.Error()) + } + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, config)...) +} + +func generateGenericAnalyticsClusterModel(ctx context.Context, client *api.ClusterClient, clusterResource analyticsClusterResourceModel) (models.Cluster, error) { + cluster := models.Cluster{ + ClusterType: utils.ToPointer("analytical"), + ClusterName: clusterResource.ClusterName.ValueStringPointer(), + Password: clusterResource.Password.ValueStringPointer(), + Provider: &models.Provider{CloudProviderId: clusterResource.CloudProvider.ValueString()}, + Region: &models.Region{Id: clusterResource.Region.ValueString()}, + InstanceType: &models.InstanceType{InstanceTypeId: clusterResource.InstanceType.ValueString()}, + PgType: &models.PgType{PgTypeId: clusterResource.PgType.ValueString()}, + PgVersion: &models.PgVersion{PgVersionId: clusterResource.PgVersion.ValueString()}, + CSPAuth: clusterResource.CspAuth.ValueBoolPointer(), + PrivateNetworking: clusterResource.PrivateNetworking.ValueBoolPointer(), + BackupRetentionPeriod: clusterResource.BackupRetentionPeriod.ValueStringPointer(), + } + + cluster.ClusterId = nil + cluster.PgConfig = nil + + allowedIpRanges := []models.AllowedIpRange{} + for _, ipRange := range clusterResource.AllowedIpRanges { + allowedIpRanges = append(allowedIpRanges, models.AllowedIpRange{ + CidrBlock: ipRange.CidrBlock, + Description: ipRange.Description.ValueString(), + }) + } + cluster.AllowedIpRanges = &allowedIpRanges + + if clusterResource.MaintenanceWindow != nil { + cluster.MaintenanceWindow = &commonApi.MaintenanceWindow{ + IsEnabled: clusterResource.MaintenanceWindow.IsEnabled, + StartTime: clusterResource.MaintenanceWindow.StartTime.ValueStringPointer(), + } + + if !clusterResource.MaintenanceWindow.StartDay.IsNull() && !clusterResource.MaintenanceWindow.StartDay.IsUnknown() { + cluster.MaintenanceWindow.StartDay = utils.ToPointer(float64(*clusterResource.MaintenanceWindow.StartDay.ValueInt64Pointer())) + } + } + + if strings.Contains(clusterResource.CloudProvider.ValueString(), "bah") { + clusterRscCSP := clusterResource.CloudProvider + clusterRscPrincipalIds := clusterResource.PeAllowedPrincipalIds + clusterRscSvcAcntIds := clusterResource.ServiceAccountIds + + // If there is an existing Principal Account Id for that Region, use that one. + pids, err := client.GetPeAllowedPrincipalIds(ctx, clusterResource.ProjectId, clusterRscCSP.ValueString(), clusterResource.Region.ValueString()) + if err != nil { + return models.Cluster{}, err + } + cluster.PeAllowedPrincipalIds = utils.ToPointer(pids.Data) + + // If there is no existing value, user should provide one + if cluster.PeAllowedPrincipalIds != nil && len(*cluster.PeAllowedPrincipalIds) == 0 { + // Here, we prefer to create a non-nil zero length slice, because we need empty JSON array + // while encoding JSON objects + // For more info, please visit https://github.com/golang/go/wiki/CodeReviewComments#declaring-empty-slices + plist := []string{} + for _, peId := range clusterRscPrincipalIds.Elements() { + plist = append(plist, strings.Replace(peId.String(), "\"", "", -1)) + } + + cluster.PeAllowedPrincipalIds = utils.ToPointer(plist) + } + + if clusterRscCSP.ValueString() == "bah:gcp" { + // If there is an existing Service Account Id for that Region, use that one. + sids, _ := client.GetServiceAccountIds(ctx, clusterResource.ProjectId, clusterResource.CloudProvider.ValueString(), clusterResource.Region.ValueString()) + cluster.ServiceAccountIds = utils.ToPointer(sids.Data) + + // If there is no existing value, user should provide one + if cluster.ServiceAccountIds != nil && len(*cluster.ServiceAccountIds) == 0 { + // Here, we prefer to create a non-nil zero length slice, because we need empty JSON array + // while encoding JSON objects. + // For more info, please visit https://github.com/golang/go/wiki/CodeReviewComments#declaring-empty-slices + slist := []string{} + for _, saId := range clusterRscSvcAcntIds.Elements() { + slist = append(slist, strings.Replace(saId.String(), "\"", "", -1)) + } + + cluster.ServiceAccountIds = utils.ToPointer(slist) + } + } + } + + return cluster, nil +} + +func read(ctx context.Context, client *api.ClusterClient, tfClusterResource *analyticsClusterResourceModel) error { + apiCluster, err := client.Read(ctx, tfClusterResource.ProjectId, *tfClusterResource.ClusterId) + if err != nil { + return err + } + + connection, err := client.ConnectionString(ctx, tfClusterResource.ProjectId, *tfClusterResource.ClusterId) + if err != nil { + return err + } + + tfClusterResource.ID = types.StringValue(fmt.Sprintf("%s/%s", tfClusterResource.ProjectId, *tfClusterResource.ClusterId)) + tfClusterResource.ClusterId = apiCluster.ClusterId + tfClusterResource.ClusterName = types.StringPointerValue(apiCluster.ClusterName) + tfClusterResource.Phase = apiCluster.Phase + tfClusterResource.CloudProvider = types.StringValue(apiCluster.Provider.CloudProviderId) + tfClusterResource.Region = types.StringValue(apiCluster.Region.Id) + tfClusterResource.InstanceType = types.StringValue(apiCluster.InstanceType.InstanceTypeId) + tfClusterResource.ResizingPvc = StringSliceToList(apiCluster.ResizingPvc) + tfClusterResource.ConnectionUri = types.StringPointerValue(&connection.PgUri) + tfClusterResource.CspAuth = types.BoolPointerValue(apiCluster.CSPAuth) + tfClusterResource.LogsUrl = apiCluster.LogsUrl + tfClusterResource.MetricsUrl = apiCluster.MetricsUrl + tfClusterResource.BackupRetentionPeriod = types.StringPointerValue(apiCluster.BackupRetentionPeriod) + tfClusterResource.PgVersion = types.StringValue(apiCluster.PgVersion.PgVersionId) + tfClusterResource.PgType = types.StringValue(apiCluster.PgType.PgTypeId) + tfClusterResource.PrivateNetworking = types.BoolPointerValue(apiCluster.PrivateNetworking) + + if apiCluster.FirstRecoverabilityPointAt != nil { + firstPointAt := apiCluster.FirstRecoverabilityPointAt.String() + tfClusterResource.FirstRecoverabilityPointAt = &firstPointAt + } + + tfClusterResource.AllowedIpRanges = []AllowedIpRangesResourceModel{} + if allowedIpRanges := apiCluster.AllowedIpRanges; allowedIpRanges != nil { + for _, ipRange := range *allowedIpRanges { + tfClusterResource.AllowedIpRanges = append(tfClusterResource.AllowedIpRanges, AllowedIpRangesResourceModel{ + CidrBlock: ipRange.CidrBlock, + Description: types.StringValue(ipRange.Description), + }) + } + } + + if pt := apiCluster.CreatedAt; pt != nil { + tfClusterResource.CreatedAt = types.StringValue(pt.String()) + } + + if apiCluster.MaintenanceWindow != nil { + tfClusterResource.MaintenanceWindow = &commonTerraform.MaintenanceWindow{ + IsEnabled: apiCluster.MaintenanceWindow.IsEnabled, + StartDay: types.Int64PointerValue(utils.ToPointer(int64(*apiCluster.MaintenanceWindow.StartDay))), + StartTime: types.StringPointerValue(apiCluster.MaintenanceWindow.StartTime), + } + } + + if apiCluster.PeAllowedPrincipalIds != nil { + tfClusterResource.PeAllowedPrincipalIds = StringSliceToSet(utils.ToValue(&apiCluster.PeAllowedPrincipalIds)) + } + + if apiCluster.ServiceAccountIds != nil { + tfClusterResource.ServiceAccountIds = StringSliceToSet(utils.ToValue(&apiCluster.ServiceAccountIds)) + } + + return nil +} + +func (r *analyticsClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state analyticsClusterResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if err := read(ctx, r.client, &state); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error reading cluster", err.Error()) + } + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, state)...) +} + +func (r *analyticsClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan analyticsClusterResourceModel + + timeout, diagnostics := plan.Timeouts.Update(ctx, time.Minute*60) + resp.Diagnostics.Append(diagnostics...) + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var state analyticsClusterResourceModel + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // cluster = pause, tf pause = true, it will error and say you will need to set pause = false to update + // cluster = pause, tf pause = false, it will resume then update + // cluster = healthy, tf pause = true, it will update then pause + // cluster = healthy, tf pause = false, it will update + if *state.Phase != models.PHASE_HEALTHY && *state.Phase != models.PHASE_PAUSED { + resp.Diagnostics.AddError("Cluster not ready please wait", "Cluster not ready for update operation please wait") + return + } + + if *state.Phase == models.PHASE_PAUSED { + if plan.Pause.ValueBool() { + resp.Diagnostics.AddError("Error cannot update paused cluster", "cannot update paused cluster, please set pause = false to resume cluster") + return + } + + if !plan.Pause.ValueBool() { + _, err := r.client.ClusterResume(ctx, plan.ProjectId, *plan.ClusterId) + if err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error resuming cluster API request", err.Error()) + } + return + } + + if err := ensureClusterIsHealthy(ctx, r.client, plan, timeout); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error waiting for the cluster is ready ", err.Error()) + } + return + } + } + } + + clusterModel, err := generateGenericAnalyticsClusterModel(ctx, r.client.ClusterClient(), plan) + if err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error updating cluster", err.Error()) + } + return + } + + _, err = r.client.Update(ctx, &clusterModel, plan.ProjectId, *plan.ClusterId) + if err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error updating cluster API request", err.Error()) + } + return + } + + // sleep after update operation as API can incorrectly respond with healthy state when checking the phase + // this is possibly a bug in the API + time.Sleep(20 * time.Second) + + if err := ensureClusterIsHealthy(ctx, r.client, plan, timeout); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error waiting for the cluster is ready ", err.Error()) + } + return + } + + if plan.Pause.ValueBool() { + _, err = r.client.ClusterPause(ctx, plan.ProjectId, *plan.ClusterId) + if err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error pausing cluster API request", err.Error()) + } + return + } + + if err := ensureClusterIsPaused(ctx, r.client, plan, timeout); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error waiting for the cluster to pause", err.Error()) + } + return + } + } + + if err := read(ctx, r.client, &plan); err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error reading cluster", err.Error()) + } + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *analyticsClusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state analyticsClusterResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + err := r.client.Delete(ctx, state.ProjectId, *state.ClusterId) + if err != nil { + if !appendDiagFromBAErr(err, &resp.Diagnostics) { + resp.Diagnostics.AddError("Error deleting cluster", err.Error()) + } + return + } +} + +func (r *analyticsClusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, "/") + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: project_id/cluster_id. Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("cluster_id"), idParts[1])...) +} + +func NewAnalyticsClusterResource() resource.Resource { + return &analyticsClusterResource{} +} diff --git a/pkg/provider/resource_cluster.go b/pkg/provider/resource_cluster.go index da23e066..3b80d91b 100644 --- a/pkg/provider/resource_cluster.go +++ b/pkg/provider/resource_cluster.go @@ -113,6 +113,19 @@ type PgBouncerSettingsModel struct { Value string `tfsdk:"value"` } +func (c ClusterResourceModel) projectId() string { + return c.ProjectId +} + +func (c ClusterResourceModel) clusterId() string { + return *c.ClusterId +} + +type retryClusterResourceModel interface { + projectId() string + clusterId() string +} + type clusterResource struct { client *api.ClusterClient } @@ -261,7 +274,6 @@ func (c *clusterResource) Schema(ctx context.Context, req resource.SchemaRequest plan_modifier.CustomPhaseForUnknown(), }, }, - "ro_connection_uri": schema.StringAttribute{ MarkdownDescription: "Cluster read-only connection URI. Only available for high availability clusters.", Computed: true, @@ -506,7 +518,7 @@ func (c *clusterResource) Create(ctx context.Context, req resource.CreateRequest return } - if err := c.ensureClusterIsHealthy(ctx, config, timeout); err != nil { + if err := ensureClusterIsHealthy(ctx, c.client, config, timeout); err != nil { if !appendDiagFromBAErr(err, &resp.Diagnostics) { resp.Diagnostics.AddError("Error waiting for the cluster is ready ", err.Error()) } @@ -522,7 +534,7 @@ func (c *clusterResource) Create(ctx context.Context, req resource.CreateRequest return } - if err := c.ensureClusterIsPaused(ctx, config, timeout); err != nil { + if err := ensureClusterIsPaused(ctx, c.client, config, timeout); err != nil { if !appendDiagFromBAErr(err, &resp.Diagnostics) { resp.Diagnostics.AddError("Error waiting for the cluster to pause", err.Error()) } @@ -601,7 +613,7 @@ func (c *clusterResource) Update(ctx context.Context, req resource.UpdateRequest return } - if err := c.ensureClusterIsHealthy(ctx, plan, timeout); err != nil { + if err := ensureClusterIsHealthy(ctx, c.client, plan, timeout); err != nil { if !appendDiagFromBAErr(err, &resp.Diagnostics) { resp.Diagnostics.AddError("Error waiting for the cluster is ready ", err.Error()) } @@ -630,7 +642,7 @@ func (c *clusterResource) Update(ctx context.Context, req resource.UpdateRequest // this is possibly a bug in the API time.Sleep(20 * time.Second) - if err := c.ensureClusterIsHealthy(ctx, plan, timeout); err != nil { + if err := ensureClusterIsHealthy(ctx, c.client, plan, timeout); err != nil { if !appendDiagFromBAErr(err, &resp.Diagnostics) { resp.Diagnostics.AddError("Error waiting for the cluster is ready ", err.Error()) } @@ -646,7 +658,7 @@ func (c *clusterResource) Update(ctx context.Context, req resource.UpdateRequest return } - if err := c.ensureClusterIsPaused(ctx, plan, timeout); err != nil { + if err := ensureClusterIsPaused(ctx, c.client, plan, timeout); err != nil { if !appendDiagFromBAErr(err, &resp.Diagnostics) { resp.Diagnostics.AddError("Error waiting for the cluster to pause", err.Error()) } @@ -843,12 +855,12 @@ func (c *clusterResource) read(ctx context.Context, tfClusterResource *ClusterRe return nil } -func (c *clusterResource) ensureClusterIsHealthy(ctx context.Context, cluster ClusterResourceModel, timeout time.Duration) error { +func ensureClusterIsHealthy(ctx context.Context, client *api.ClusterClient, cluster retryClusterResourceModel, timeout time.Duration) error { return retry.RetryContext( ctx, timeout, func() *retry.RetryError { - resp, err := c.client.Read(ctx, cluster.ProjectId, *cluster.ClusterId) + resp, err := client.Read(ctx, cluster.projectId(), cluster.clusterId()) if err != nil { return retry.NonRetryableError(err) } @@ -860,12 +872,12 @@ func (c *clusterResource) ensureClusterIsHealthy(ctx context.Context, cluster Cl }) } -func (c *clusterResource) ensureClusterIsPaused(ctx context.Context, cluster ClusterResourceModel, timeout time.Duration) error { +func ensureClusterIsPaused(ctx context.Context, client *api.ClusterClient, cluster retryClusterResourceModel, timeout time.Duration) error { return retry.RetryContext( ctx, timeout, func() *retry.RetryError { - resp, err := c.client.Read(ctx, cluster.ProjectId, *cluster.ClusterId) + resp, err := client.Read(ctx, cluster.projectId(), cluster.clusterId()) if err != nil { return retry.NonRetryableError(err) }