diff --git a/Dockerfile b/Dockerfile index e4425bb4..b07a41d4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ -ARG TERRAFORM_VERSION=1.8.2 -ARG AZURECLI_VERSION=2.59.0 +ARG TERRAFORM_VERSION=1.9.5 +ARG AZURECLI_VERSION=2.64.0 FROM hashicorp/terraform:$TERRAFORM_VERSION as terraform FROM mcr.microsoft.com/azure-cli:$AZURECLI_VERSION diff --git a/README.md b/README.md index 6e5167e6..0c11f1a2 100644 --- a/README.md +++ b/README.md @@ -57,10 +57,10 @@ This project supports two options for running Terraform scripts: Access to an **Azure Subscription** and an [**Identity**](./docs/user/TerraformAzureAuthentication.md) with the *Contributor* role are required. #### Terraform Requirements: -- [Terraform](https://www.terraform.io/downloads.html) - v1.8.2 +- [Terraform](https://www.terraform.io/downloads.html) - v1.9.6 - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) - v1.29.7 - [jq](https://stedolan.github.io/jq/) - v1.6 -- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure) - (optional - useful as an alternative to the Azure Portal) - v2.59.0 +- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure) - (optional - useful as an alternative to the Azure Portal) - v2.64.0 #### Docker Requirements: - [Docker](https://docs.docker.com/get-docker/) diff --git a/container-structure-test.yaml b/container-structure-test.yaml index f513c3d8..7491cc8c 100644 --- a/container-structure-test.yaml +++ b/container-structure-test.yaml @@ -17,7 +17,7 @@ commandTests: - name: "terraform version" command: "terraform" args: ["--version"] - expectedOutput: ["Terraform v1.8.2"] + expectedOutput: ["Terraform v1.9.6"] - name: "python version" command: "python3" args: ["--version"] @@ -29,7 +29,7 @@ commandTests: - -c - | az version -o tsv - expectedOutput: ["2.59.0\t2.59.0\t1.1.0"] + expectedOutput: ["2.64.0\t2.64.0\t1.1.0"] metadataTest: workdir: "/viya4-iac-azure" diff --git a/docs/CONFIG-VARS.md b/docs/CONFIG-VARS.md index 823a0ec2..94cba51e 100644 --- a/docs/CONFIG-VARS.md +++ b/docs/CONFIG-VARS.md @@ -127,14 +127,14 @@ The default values for the `subnets` variable are as follows: aks = { "prefixes": ["192.168.0.0/23"], "service_endpoints": ["Microsoft.Sql"], - "private_endpoint_network_policies_enabled": false, + "private_endpoint_network_policies": "Disabled", "private_link_service_network_policies_enabled": false, "service_delegations": {}, } misc = { "prefixes": ["192.168.2.0/24"], "service_endpoints": ["Microsoft.Sql"], - "private_endpoint_network_policies_enabled": false, + "private_endpoint_network_policies": "Disabled", "private_link_service_network_policies_enabled": false, "service_delegations": {}, } @@ -142,7 +142,7 @@ The default values for the `subnets` variable are as follows: netapp = { "prefixes": ["192.168.3.0/24"], "service_endpoints": [], - "private_endpoint_network_policies_enabled": false, + "private_endpoint_network_policies": "Disabled", "private_link_service_network_policies_enabled": false, "service_delegations": { netapp = { diff --git a/examples/sample-input-postgres.tfvars b/examples/sample-input-postgres.tfvars index a5d18879..e32a51c7 100644 --- a/examples/sample-input-postgres.tfvars +++ b/examples/sample-input-postgres.tfvars @@ -43,21 +43,21 @@ postgres_servers = { # aks = { # "prefixes" : ["192.168.0.0/23"], # "service_endpoints" : ["Microsoft.Sql"], -# "private_endpoint_network_policies_enabled" : true, +# "private_endpoint_network_policies" : "Enabled", # "private_link_service_network_policies_enabled" : false, # "service_delegations" : {}, # } # misc = { # "prefixes" : ["192.168.2.0/24"], # "service_endpoints" : ["Microsoft.Sql"], -# "private_endpoint_network_policies_enabled" : true, +# "private_endpoint_network_policies" : "Enabled", # "private_link_service_network_policies_enabled" : false, # "service_delegations" : {}, # } # netapp = { # "prefixes" : ["192.168.3.0/24"], # "service_endpoints" : [], -# "private_endpoint_network_policies_enabled" : false, +# "private_endpoint_network_policies" : "Disabled", # "private_link_service_network_policies_enabled" : false, # "service_delegations" : { # netapp = { @@ -69,7 +69,7 @@ postgres_servers = { # postgresql = { # "prefixes": ["192.168.4.0/24"], # "service_endpoints": ["Microsoft.Sql"], -# "private_endpoint_network_policies_enabled": true, +# "private_endpoint_network_policies": "Enabled", # "private_link_service_network_policies_enabled": false, # "service_delegations": { # flexpostgres = { diff --git a/examples/sample-input-singlestore.tfvars b/examples/sample-input-singlestore.tfvars index 6ef6f078..21a7ae60 100644 --- a/examples/sample-input-singlestore.tfvars +++ b/examples/sample-input-singlestore.tfvars @@ -133,14 +133,14 @@ subnets = { aks = { "prefixes": ["192.168.0.0/21"], "service_endpoints": ["Microsoft.Sql"], - "private_endpoint_network_policies_enabled": false, + "private_endpoint_network_policies": "Disabled", "private_link_service_network_policies_enabled": false, "service_delegations": {}, } misc = { "prefixes": ["192.168.8.0/24"], "service_endpoints": ["Microsoft.Sql"], - "private_endpoint_network_policies_enabled": false, + "private_endpoint_network_policies": "Disabled", "private_link_service_network_policies_enabled": false, "service_delegations": {}, } @@ -148,7 +148,7 @@ subnets = { netapp = { "prefixes": ["192.168.9.0/24"], "service_endpoints": [], - "private_endpoint_network_policies_enabled": false, + "private_endpoint_network_policies": "Disabled", "private_link_service_network_policies_enabled": false, "service_delegations": { netapp = { diff --git a/main.tf b/main.tf index 28ee7a23..493819f3 100644 --- a/main.tf +++ b/main.tf @@ -144,7 +144,7 @@ module "aks" { aks_cluster_max_pods = var.default_nodepool_max_pods aks_cluster_os_disk_size = var.default_nodepool_os_disk_size aks_cluster_node_vm_size = var.default_nodepool_vm_type - aks_cluster_enable_host_encryption = var.aks_cluster_enable_host_encryption + aks_cluster_host_encryption_enabled = var.aks_cluster_host_encryption_enabled aks_node_disk_encryption_set_id = var.aks_node_disk_encryption_set_id aks_cluster_node_admin = var.node_vm_admin aks_cluster_ssh_public_key = try(file(var.ssh_public_key), "") @@ -159,7 +159,6 @@ module "aks" { aks_network_policy = var.aks_network_policy aks_network_plugin_mode = var.aks_network_plugin_mode aks_dns_service_ip = var.aks_dns_service_ip - aks_docker_bridge_cidr = var.aks_docker_bridge_cidr cluster_egress_type = local.cluster_egress_type aks_pod_cidr = var.aks_pod_cidr aks_service_cidr = var.aks_service_cidr @@ -202,7 +201,7 @@ module "node_pools" { os_disk_size = each.value.os_disk_size # TODO: enable with azurerm v2.37.0 # os_disk_type = each.value.os_disk_type - enable_auto_scaling = each.value.min_nodes == each.value.max_nodes ? false : true + auto_scaling_enabled = each.value.min_nodes == each.value.max_nodes ? false : true node_count = each.value.min_nodes min_nodes = each.value.min_nodes == each.value.max_nodes ? null : each.value.min_nodes max_nodes = each.value.min_nodes == each.value.max_nodes ? null : each.value.max_nodes @@ -212,7 +211,7 @@ module "node_pools" { zones = (var.node_pools_availability_zone == "" || var.node_pools_proximity_placement == true) ? [] : (var.node_pools_availability_zones != null) ? var.node_pools_availability_zones : [var.node_pools_availability_zone] proximity_placement_group_id = element(coalescelist(azurerm_proximity_placement_group.proximity[*].id, [""]), 0) orchestrator_version = var.kubernetes_version - enable_host_encryption = var.aks_cluster_enable_host_encryption + host_encryption_enabled = var.aks_cluster_host_encryption_enabled tags = var.tags } diff --git a/modules/aks_node_pool/main.tf b/modules/aks_node_pool/main.tf index 500cd98e..bcf327ff 100755 --- a/modules/aks_node_pool/main.tf +++ b/modules/aks_node_pool/main.tf @@ -4,30 +4,29 @@ # Reference: https://www.terraform.io/docs/providers/azurerm/r/kubernetes_cluster_node_pool.html resource "azurerm_kubernetes_cluster_node_pool" "autoscale_node_pool" { - count = var.enable_auto_scaling ? 1 : 0 + count = var.auto_scaling_enabled ? 1 : 0 name = var.node_pool_name kubernetes_cluster_id = var.aks_cluster_id vnet_subnet_id = var.vnet_subnet_id zones = var.zones fips_enabled = var.fips_enabled - enable_host_encryption = var.enable_host_encryption + host_encryption_enabled = var.host_encryption_enabled proximity_placement_group_id = var.proximity_placement_group_id == "" ? null : var.proximity_placement_group_id vm_size = var.machine_type os_disk_size_gb = var.os_disk_size # TODO: enable after azurerm v2.37.0 # os_disk_type = var.os_disk_type - os_type = var.os_type - enable_auto_scaling = var.enable_auto_scaling - # Still in preview, revisit if needed later - https://docs.microsoft.com/en-us/azure/aks/use-multiple-node-pools#assign-a-public-ip-per-node-for-your-node-pools-preview - # enable_node_public_ip = var.enable_node_public_ip - node_count = var.node_count - max_count = var.max_nodes - min_count = var.min_nodes - max_pods = var.max_pods - node_labels = var.node_labels - node_taints = var.node_taints - orchestrator_version = var.orchestrator_version - tags = var.tags + os_type = var.os_type + auto_scaling_enabled = var.auto_scaling_enabled + node_public_ip_enabled = var.node_public_ip_enabled + node_count = var.node_count + max_count = var.max_nodes + min_count = var.min_nodes + max_pods = var.max_pods + node_labels = var.node_labels + node_taints = var.node_taints + orchestrator_version = var.orchestrator_version + tags = var.tags lifecycle { ignore_changes = [node_count] @@ -35,20 +34,20 @@ resource "azurerm_kubernetes_cluster_node_pool" "autoscale_node_pool" { } resource "azurerm_kubernetes_cluster_node_pool" "static_node_pool" { - count = var.enable_auto_scaling ? 0 : 1 + count = var.auto_scaling_enabled ? 0 : 1 name = var.node_pool_name kubernetes_cluster_id = var.aks_cluster_id vnet_subnet_id = var.vnet_subnet_id zones = var.zones fips_enabled = var.fips_enabled - enable_host_encryption = var.enable_host_encryption + host_encryption_enabled = var.host_encryption_enabled proximity_placement_group_id = var.proximity_placement_group_id == "" ? null : var.proximity_placement_group_id vm_size = var.machine_type os_disk_size_gb = var.os_disk_size # TODO: enable after azurerm v2.37.0 # os_disk_type = var.os_disk_type os_type = var.os_type - enable_auto_scaling = var.enable_auto_scaling + auto_scaling_enabled = var.auto_scaling_enabled node_count = var.node_count max_count = var.max_nodes min_count = var.min_nodes diff --git a/modules/aks_node_pool/variables.tf b/modules/aks_node_pool/variables.tf index e3981b8c..8e27b7a3 100755 --- a/modules/aks_node_pool/variables.tf +++ b/modules/aks_node_pool/variables.tf @@ -23,7 +23,7 @@ variable "fips_enabled" { default = false } -variable "enable_host_encryption" { +variable "host_encryption_enabled" { description = "Enables host encryption on all the nodes in the Node Pool. Changing this forces a new resource to be created." type = bool default = false @@ -65,12 +65,18 @@ variable "node_count" { default = 1 } -variable "enable_auto_scaling" { +variable "auto_scaling_enabled" { description = "Whether to enable auto-scaler." type = bool default = false } +variable "node_public_ip_enabled" { + description = "Should nodes in this Node Pool have a Public IP Address" + type = bool + default = false +} + variable "max_pods" { description = "The maximum number of pods that can run on each agent. Changing this forces a new resource to be created." type = number @@ -116,23 +122,3 @@ variable "proximity_placement_group_id" { type = string default = "" } - -# For future - https://docs.microsoft.com/en-us/azure/aks/spot-node-pool -# -# variable "priority" { -# description = "The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created." -# type = string -# default = "Regular" -# } - -# variable "eviction_policy" { -# description = "The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. An Eviction Policy can only be configured when priority is set to Spot" -# type = string -# default = null -# } - -# variable "spot_max_price" { -# description = "The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created." -# type = number -# default = -1 -# } diff --git a/modules/azure_aks/main.tf b/modules/azure_aks/main.tf index c186b26b..3bf19996 100644 --- a/modules/azure_aks/main.tf +++ b/modules/azure_aks/main.tf @@ -17,10 +17,9 @@ resource "azurerm_kubernetes_cluster" "aks" { # https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions # az aks get-versions --location eastus -o table - kubernetes_version = var.kubernetes_version - api_server_authorized_ip_ranges = var.aks_cluster_endpoint_public_access_cidrs - private_cluster_enabled = var.aks_private_cluster - private_dns_zone_id = var.aks_private_cluster && var.aks_cluster_private_dns_zone_id != "" ? var.aks_cluster_private_dns_zone_id : (var.aks_private_cluster ? "System" : null) + kubernetes_version = var.kubernetes_version + private_cluster_enabled = var.aks_private_cluster + private_dns_zone_id = var.aks_private_cluster && var.aks_cluster_private_dns_zone_id != "" ? var.aks_cluster_private_dns_zone_id : (var.aks_private_cluster ? "System" : null) network_profile { # Docs on AKS Advanced Networking config @@ -37,11 +36,17 @@ resource "azurerm_kubernetes_cluster" "aks" { service_cidr = var.aks_service_cidr dns_service_ip = var.aks_dns_service_ip pod_cidr = var.aks_network_plugin == "kubenet" ? var.aks_pod_cidr : null - docker_bridge_cidr = var.aks_docker_bridge_cidr outbound_type = var.cluster_egress_type load_balancer_sku = "standard" } + dynamic "api_server_access_profile" { + for_each = length(var.aks_cluster_endpoint_public_access_cidrs) > 0 ? [1] : [] + content { + authorized_ip_ranges = var.aks_cluster_endpoint_public_access_cidrs + } + } + dynamic "linux_profile" { for_each = var.aks_cluster_ssh_public_key == "" ? [] : [1] content { @@ -55,31 +60,29 @@ resource "azurerm_kubernetes_cluster" "aks" { dynamic "azure_active_directory_role_based_access_control" { for_each = var.rbac_aad_enabled ? [1] : [] content { - managed = true - tenant_id = var.rbac_aad_tenant_id - admin_group_object_ids = var.rbac_aad_admin_group_object_ids - azure_rbac_enabled = false + tenant_id = var.rbac_aad_tenant_id + admin_group_object_ids = var.rbac_aad_admin_group_object_ids + azure_rbac_enabled = false } } default_node_pool { - name = "system" - vm_size = var.aks_cluster_node_vm_size - zones = var.aks_availability_zones - enable_auto_scaling = var.aks_cluster_node_auto_scaling - enable_node_public_ip = false - node_labels = {} - node_taints = [] - fips_enabled = var.fips_enabled - enable_host_encryption = var.aks_cluster_enable_host_encryption - max_pods = var.aks_cluster_max_pods - os_disk_size_gb = var.aks_cluster_os_disk_size - max_count = var.aks_cluster_max_nodes - min_count = var.aks_cluster_min_nodes - node_count = var.aks_cluster_node_count - vnet_subnet_id = var.aks_vnet_subnet_id - tags = var.aks_cluster_tags - orchestrator_version = var.kubernetes_version + name = "system" + vm_size = var.aks_cluster_node_vm_size + zones = var.aks_availability_zones + auto_scaling_enabled = var.aks_cluster_node_auto_scaling + node_public_ip_enabled = false + node_labels = {} + fips_enabled = var.fips_enabled + host_encryption_enabled = var.aks_cluster_host_encryption_enabled + max_pods = var.aks_cluster_max_pods + os_disk_size_gb = var.aks_cluster_os_disk_size + max_count = var.aks_cluster_max_nodes + min_count = var.aks_cluster_min_nodes + node_count = var.aks_cluster_node_count + vnet_subnet_id = var.aks_vnet_subnet_id + tags = var.aks_cluster_tags + orchestrator_version = var.kubernetes_version } dynamic "service_principal" { diff --git a/modules/azure_aks/variables.tf b/modules/azure_aks/variables.tf index 2e8584d1..187d8745 100644 --- a/modules/azure_aks/variables.tf +++ b/modules/azure_aks/variables.tf @@ -131,7 +131,7 @@ variable "aks_cluster_max_pods" { default = 110 } -variable "aks_cluster_enable_host_encryption" { +variable "aks_cluster_host_encryption_enabled" { description = "Enables host encryption on all the nodes in the Default Node Pool" type = bool default = false @@ -186,18 +186,6 @@ variable "aks_dns_service_ip" { condition = var.aks_dns_service_ip != null ? can(regex("^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", var.aks_dns_service_ip)) : false error_message = "ERROR: aks_dns_service_ip - value must not be null and must be a valid IP address." } - -} - -variable "aks_docker_bridge_cidr" { - description = "IP address (in CIDR notation) used as the Docker bridge IP address on nodes. Changing this forces a new resource to be created." - type = string - default = "172.17.0.1/16" - validation { - condition = var.aks_docker_bridge_cidr != null ? can(cidrnetmask(var.aks_docker_bridge_cidr)) : false - error_message = "ERROR: aks_docker_bridge_cidr - value must not be null and must be valid CIDR." - } - } variable "aks_pod_cidr" { diff --git a/modules/azurerm_vm/main.tf b/modules/azurerm_vm/main.tf index e7a98e20..3b1f8fb8 100644 --- a/modules/azurerm_vm/main.tf +++ b/modules/azurerm_vm/main.tf @@ -8,16 +8,16 @@ resource "azurerm_public_ip" "vm_ip" { location = var.azure_rg_location resource_group_name = var.azure_rg_name allocation_method = var.enable_public_static_ip ? "Static" : "Dynamic" - sku = var.vm_zone == null ? "Basic" : "Standard" + sku = "Standard" zones = var.vm_zone == null ? [] : [var.vm_zone] tags = var.tags } resource "azurerm_network_interface" "vm_nic" { - name = "${var.name}-nic" - location = var.azure_rg_location - resource_group_name = var.azure_rg_name - enable_accelerated_networking = length(regexall("-nfs", var.name)) > 0 ? true : var.enable_accelerated_networking + name = "${var.name}-nic" + location = var.azure_rg_location + resource_group_name = var.azure_rg_name + accelerated_networking_enabled = length(regexall("-nfs", var.name)) > 0 ? true : var.accelerated_networking_enabled ip_configuration { name = "${var.name}-ip_config" diff --git a/modules/azurerm_vm/variables.tf b/modules/azurerm_vm/variables.tf index 02b221d1..01d751bf 100644 --- a/modules/azurerm_vm/variables.tf +++ b/modules/azurerm_vm/variables.tf @@ -133,7 +133,7 @@ variable "os_disk_caching" { default = "ReadOnly" } -variable "enable_accelerated_networking" { +variable "accelerated_networking_enabled" { description = "Enables network accelaration for VMs. By default enabled for the nfs and jump VMs. For any other VM the default is false" type = bool default = false diff --git a/modules/azurerm_vnet/main.tf b/modules/azurerm_vnet/main.tf index 7c1000a3..033e500c 100644 --- a/modules/azurerm_vnet/main.tf +++ b/modules/azurerm_vnet/main.tf @@ -41,7 +41,7 @@ resource "azurerm_subnet" "subnet" { virtual_network_name = local.vnet_name address_prefixes = each.value.prefixes service_endpoints = each.value.service_endpoints - private_endpoint_network_policies_enabled = each.value.private_endpoint_network_policies_enabled + private_endpoint_network_policies = each.value.private_endpoint_network_policies private_link_service_network_policies_enabled = each.value.private_link_service_network_policies_enabled dynamic "delegation" { for_each = each.value.service_delegations diff --git a/modules/azurerm_vnet/variables.tf b/modules/azurerm_vnet/variables.tf index 94f97116..74d2acf5 100644 --- a/modules/azurerm_vnet/variables.tf +++ b/modules/azurerm_vnet/variables.tf @@ -40,7 +40,7 @@ variable "subnets" { type = map(object({ prefixes = list(string) service_endpoints = list(string) - private_endpoint_network_policies_enabled = bool + private_endpoint_network_policies = string private_link_service_network_policies_enabled = bool service_delegations = map(object({ name = string diff --git a/monitor.tf b/monitor.tf index ce80f65b..2fbcb2da 100755 --- a/monitor.tf +++ b/monitor.tf @@ -50,18 +50,12 @@ resource "azurerm_monitor_diagnostic_setting" "audit" { target_resource_id = module.aks.cluster_id log_analytics_workspace_id = azurerm_log_analytics_workspace.viya4[0].id - dynamic "log" { + dynamic "enabled_log" { iterator = log_category for_each = var.resource_log_category content { category = log_category.value - enabled = true - - retention_policy { - enabled = true - days = var.log_retention_in_days - } } } @@ -72,11 +66,6 @@ resource "azurerm_monitor_diagnostic_setting" "audit" { content { category = metric_category.value enabled = true - - retention_policy { - enabled = true - days = var.log_retention_in_days - } } } } diff --git a/variables.tf b/variables.tf index b64a27f0..eee1f162 100644 --- a/variables.tf +++ b/variables.tf @@ -184,7 +184,7 @@ variable "default_nodepool_availability_zones" { default = ["1"] } -variable "aks_cluster_enable_host_encryption" { +variable "aks_cluster_host_encryption_enabled" { description = "Enables host encryption on all the nodes in the Node Pool." type = bool default = false @@ -231,18 +231,6 @@ variable "aks_dns_service_ip" { } } -variable "aks_docker_bridge_cidr" { - description = "IP address (in CIDR notation) used as the Docker bridge IP address on nodes. Changing this forces a new resource to be created." - type = string - default = "172.17.0.1/16" - - validation { - condition = var.aks_docker_bridge_cidr != null ? can(cidrnetmask(var.aks_docker_bridge_cidr)) : false - error_message = "ERROR: aks_docker_bridge_cidr - value must not be null and must be valid CIDR." - } - -} - variable "aks_pod_cidr" { description = "The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet. Changing this forces a new resource to be created." type = string @@ -739,7 +727,7 @@ variable "subnets" { type = map(object({ prefixes = list(string) service_endpoints = list(string) - private_endpoint_network_policies_enabled = bool + private_endpoint_network_policies = string private_link_service_network_policies_enabled = bool service_delegations = map(object({ name = string @@ -750,21 +738,21 @@ variable "subnets" { aks = { "prefixes" : ["192.168.0.0/23"], "service_endpoints" : ["Microsoft.Sql"], - "private_endpoint_network_policies_enabled" : true, + "private_endpoint_network_policies" : "Enabled", "private_link_service_network_policies_enabled" : false, "service_delegations" : {}, } misc = { "prefixes" : ["192.168.2.0/24"], "service_endpoints" : ["Microsoft.Sql"], - "private_endpoint_network_policies_enabled" : true, + "private_endpoint_network_policies" : "Enabled", "private_link_service_network_policies_enabled" : false, "service_delegations" : {}, } netapp = { "prefixes" : ["192.168.3.0/24"], "service_endpoints" : [], - "private_endpoint_network_policies_enabled" : false, + "private_endpoint_network_policies" : "Disabled", "private_link_service_network_policies_enabled" : false, "service_delegations" : { netapp = { diff --git a/versions.tf b/versions.tf index 2c62cbbe..ad65932c 100644 --- a/versions.tf +++ b/versions.tf @@ -3,16 +3,16 @@ terraform { - required_version = ">= 1.8.0" + required_version = ">= 1.9.0" required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.92.0" + version = "4.2.0" } azuread = { source = "hashicorp/azuread" - version = "~>2.47" + version = "~>2.53" } external = { source = "hashicorp/external" @@ -20,7 +20,7 @@ terraform { } local = { source = "hashicorp/local" - version = "~>2.4" + version = "~>2.5" } null = { source = "hashicorp/null" @@ -36,7 +36,7 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = "~>2.25" + version = "~>2.32" } } }