diff --git a/_data.tf b/_data.tf index 109a7d7..e69de29 100644 --- a/_data.tf +++ b/_data.tf @@ -1,6 +0,0 @@ -data "azurerm_subnet" "subnet" { - count = var.subnet_network_name == "" ? 1 : 0 - name = "subnet-k8s" - virtual_network_name = var.virtual_network_name == "" ? azurerm_virtual_network.vnet[0].name : var.virtual_network_name - resource_group_name = azurerm_resource_group.rg.name -} \ No newline at end of file diff --git a/_local.tf b/_local.tf index fcbdaa8..cba9ad5 100644 --- a/_local.tf +++ b/_local.tf @@ -1,3 +1,40 @@ locals { - subnet_id = length(data.azurerm_subnet.subnet) > 0 ? data.azurerm_subnet.subnet[0].id : null + default_agent_profile = { + name = var.default_node_pool.name + node_count = var.default_node_pool.node_count + vm_size = var.default_node_pool.vm_size + os_type = var.default_node_pool.os_type + workload_runtime = var.default_node_pool.workload_runtime + zones = var.default_node_pool.zones + enable_auto_scaling = var.default_node_pool.enable_auto_scaling + min_count = var.default_node_pool.min_count + max_count = var.default_node_pool.max_count + type = var.default_node_pool.type + node_labels = var.default_node_pool.node_labels + orchestrator_version = var.default_node_pool.orchestrator_version + priority = var.default_node_pool.priority + enable_host_encryption = var.default_node_pool.enable_host_encryption + eviction_policy = var.default_node_pool.eviction_policy + vnet_subnet_id = var.vnet_subnet_id + max_pods = var.default_node_pool.max_pods + os_disk_type = var.default_node_pool.os_disk_type + os_disk_size_gb = var.default_node_pool.os_disk_size_gb + enable_node_public_ip = var.default_node_pool.enable_node_public_ip + scale_down_mode = var.default_node_pool.scale_down_mode + } + + default_node_pool = merge(local.default_agent_profile, var.default_node_pool) + + private_dns_zone = var.private_dns_zone_type == "Custom" ? var.private_dns_zone_id : var.private_dns_zone_type + + default_no_proxy_url_list = [ + var.vnet_address_space, + var.aks_pod_cidr, + var.service_cidr, + "localhost", + "konnectivity", + "127.0.0.1", # Localhost + "168.63.129.16", # Azure platform global VIP (https://learn.microsoft.com/en-us/azure/virtual-network/what-is-ip-address-168-63-129-16) + "169.254.169.254", # Azure Instance Metadata Service (IMDS) + ] } diff --git a/_output.tf b/_output.tf index 6db19cf..70be9da 100644 --- a/_output.tf +++ b/_output.tf @@ -6,6 +6,11 @@ output "kube_config_file" { output "cluster_name" { description = "Cluster name to be used in the context of kubectl" - value = azurerm_kubernetes_cluster.cluster.name + value = azurerm_kubernetes_cluster.aks_cluster.name +} + +output "cluster_id" { + description = "Describe the Cluster ID" + value = azurerm_kubernetes_cluster.aks_cluster.id } diff --git a/_variable.tf b/_variable.tf index c44fa62..1c272b4 100644 --- a/_variable.tf +++ b/_variable.tf @@ -1,7 +1,6 @@ variable "resource_group" { description = "(Required) Specifies the Resource Group where the Managed Kubernetes Cluster should exist." type = string - default = "Resource_group" } variable "location" { @@ -13,55 +12,21 @@ variable "location" { variable "prefix" { description = "(Required) Base name used by resources (cluster name, main service and others)." type = string - default = "new_kubernetes" + default = "SpecialChem_DevK8s" } variable "k8s_version" { description = "(Optional) Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade)." type = string - default = "1.26" + default = "1.28.5" } -variable "vm_size" { - description = "(Required) The size of the Virtual Machine, such as Standard_DS2_v2." - type = string - default = "Standard_DS2_v2" -} - -variable "auto_scaling_default_node" { - description = "(Optional) Kubernetes Auto Scaler must be enabled for this main pool" - type = bool - default = true -} - -variable "availability_zones" { - description = "(Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created." - type = list(string) - default = ["1", "2", "3"] +variable "vnet_subnet_id" { + type = string } -variable "node_count" { - description = "(Optional) The initial number of nodes which should exist in this Node Pool. If specified this must be between 1 and 100 and between min_count and max_count." - type = string - default = 1 -} - -variable "node_min_count" { - description = "(Required) The minimum number of nodes which should exist in this Node Pool. If specified this must be between 1 and 100." - type = number - default = 1 -} - -variable "node_max_count" { - description = "(Required) The maximum number of nodes which should exist in this Node Pool. If specified this must be between 1 and 100." - type = number - default = 10 -} - -variable "max_pods" { - description = "(Optional) The maximum number of pods that can run on each agent." - type = number - default = 50 +variable "vnet_address_space" { + type = string } variable "network_plugin" { @@ -70,49 +35,30 @@ variable "network_plugin" { } variable "service_cidr" { - description = "(Optional) The Network Range used by the Kubernetes service.Changing this forces a new resource to be created." - type = string - default = "10.0.0.0/16" + type = string } -variable "dns_service_ip" { - description = "(Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns)." +variable "support_plan" { type = string - default = "10.0.0.10" + default = "KubernetesOfficial" + description = "The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`." } -variable "pod_cidr" { - description = "(Optional) The CIDR to use for pod IP addresses." +variable "automatic_channel_upgrade" { type = string - default = "10.244.0.0/16" + default = null + description = "(Optional) The upgrade channel for this Kubernetes Cluster. Possible values are `patch`, `rapid`, `node-image` and `stable`. By default automatic-upgrades are turned off." } +variable "azure_policy_enabled" { + type = bool + default = false + description = "Enable Azure Policy Addon." +} variable "dns_prefix" { type = string - default = "k8stest" + default = "specialDevk8s" } - -variable "virtual_network_name" { - description = "Virtual Network name" - default = "vnet-k8" -} - -variable "subnet_network_name" { - description = "Subnet netwotk name" - default = "subnet-k8s" -} - -variable "virtual_network_address" { - description = "Virtual network address" - default = "10.0.0.0/8" -} - -variable "subnet_address" { - description = "Subnet address" - default = "10.0.1.0/16" - -} - variable "sku_tier" { description = "(Optional) The SKU Tier that should be used for this Kubernetes Cluster. Possible values are Free and Paid (which includes the Uptime SLA). Defaults to Free." default = "Free" @@ -132,15 +78,14 @@ variable "tags" { variable "name_prefix" { description = "Used in tags cluster and nodes" type = string - default = "vnet" + default = "AKS" } variable "default_tags" { type = map(string) description = "A map to add common tags to all the resources" default = { - "Scope" : "VNET" - "CreatedBy" : "Terraform" + "CreatedBy" : "TTN" } } @@ -148,7 +93,7 @@ variable "common_tags" { type = map(string) description = "A map to add common tags to all the resources" default = { - Project = "VNet" + Project = "SpecialChem" Managed-By = "TTN" } } @@ -188,20 +133,203 @@ variable "create_additional_node_pool" { default = false } -variable "ingress_application_gateway" { - description = "Specifies the Application Gateway Ingress Controller addon configuration." +variable "oms_log_analytics_workspace_id" { + type = string + default = "" +} + +variable "load_balancer_profile_enabled" { + type = bool + default = false + description = "(Optional) Enable a load_balancer_profile block. This can only be used when load_balancer_sku is set to `standard`." + nullable = false +} + +variable "load_balancer_sku" { + type = string + default = "standard" + description = "(Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created." + + validation { + condition = contains(["basic", "standard"], var.load_balancer_sku) + error_message = "Possible values are `basic` and `standard`" + } +} + +variable "load_balancer_profile_idle_timeout_in_minutes" { + type = number + default = 30 + description = "(Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive." +} + +variable "load_balancer_profile_managed_outbound_ip_count" { + type = number + default = null + description = "(Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive" +} + +variable "load_balancer_profile_outbound_ip_prefix_ids" { + type = set(string) + default = null + description = "(Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer." +} + +variable "load_balancer_profile_outbound_ip_address_ids" { + type = set(string) + default = null + description = "(Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer." +} + +variable "load_balancer_profile_managed_outbound_ipv6_count" { + type = number + default = null + description = "(Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed_outbound_ipv6_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature" +} + +variable "load_balancer_profile_outbound_ports_allocated" { + type = number + default = 0 + description = "(Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`" +} +variable "outbound_type" { + description = "The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are `loadBalancer` and `userDefinedRouting`." + type = string + default = "loadBalancer" +} + +variable "aks_pod_cidr" { + description = "CIDR used by pods when network plugin is set to `kubenet`." + type = string + default = "10.41.22.0/22" +} + +variable "default_node_pool" { + description = "Default node pool configuration" type = object({ - enabled = bool - gateway_id = string - gateway_name = string - subnet_cidr = string - subnet_id = string + name = optional(string, "default") + node_count = optional(number, 1) + vm_size = optional(string, "Standard_D2_v3") + os_type = optional(string, "Linux") + workload_runtime = optional(string, null) + zones = optional(list(number), [1, 2]) + enable_auto_scaling = optional(bool, false) + min_count = optional(number, 1) + max_count = optional(number, 10) + type = optional(string, "VirtualMachineScaleSets") + node_labels = optional(map(any), null) + orchestrator_version = optional(string, null) + priority = optional(string, null) + enable_host_encryption = optional(bool, null) + eviction_policy = optional(string, null) + max_pods = optional(number, 30) + os_disk_type = optional(string, "Managed") + os_disk_size_gb = optional(number, 128) + enable_node_public_ip = optional(bool, false) + scale_down_mode = optional(string, "Delete") }) - default = { - enabled = false - gateway_id = null - gateway_name = null - subnet_cidr = null - subnet_id = null - } + default = {} +} + +variable "auto_scaler_profile" { + description = "Configuration of `auto_scaler_profile` block object" + type = object({ + balance_similar_node_groups = optional(bool, false) + expander = optional(string, "random") + max_graceful_termination_sec = optional(number, 600) + max_node_provisioning_time = optional(string, "15m") + max_unready_nodes = optional(number, 3) + max_unready_percentage = optional(number, 45) + new_pod_scale_up_delay = optional(string, "10s") + scale_down_delay_after_add = optional(string, "10m") + scale_down_delay_after_delete = optional(string, "10s") + scale_down_delay_after_failure = optional(string, "3m") + scan_interval = optional(string, "10s") + scale_down_unneeded = optional(string, "10m") + scale_down_unready = optional(string, "20m") + scale_down_utilization_threshold = optional(number, 0.5) + empty_bulk_delete_max = optional(number, 10) + skip_nodes_with_local_storage = optional(bool, true) + skip_nodes_with_system_pods = optional(bool, true) + }) + default = null +} + +variable "key_vault_secrets_provider" { + description = "Enable AKS built-in Key Vault secrets provider. If enabled, an identity is created by the AKS itself and exported from this module." + type = object({ + secret_rotation_enabled = optional(bool, false) + secret_rotation_interval = optional(string, "") + }) + default = null +} + +variable "private_cluster_enabled" { + description = "Configure AKS as a Private Cluster." + type = bool + default = true +} + +variable "private_dns_zone_type" { + type = string + default = "System" + description = < variable +If this settings is used, aks user assigned identity will be "userassigned" instead of "systemassigned" +and the aks user must have "Private DNS Zone Contributor" role on the private DNS Zone +- "System" : AKS will manage the private zone and create it in the same resource group as the Node Resource Group +- "None" : In case of None you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. +EOD +} + +variable "private_dns_zone_id" { + type = string + default = null + description = "Id of the private DNS Zone when is custom" +} + +variable "node_resource_group" { + description = "Name of the resource group in which to put AKS nodes. If null default to MC_" + type = string + default = null +} + +variable "oidc_issuer_enabled" { + description = "Whether to enable OpenID Connect issuer or not." + type = bool + default = false +} + +variable "http_application_routing_enabled" { + description = "Whether HTTP Application Routing is enabled." + type = bool + default = false +} + +variable "aks_http_proxy_settings" { + description = "AKS HTTP proxy settings. URLs must be in format `http(s)://fqdn:port/`. When setting the `no_proxy_url_list` parameter, the AKS Private Endpoint domain name and the AKS VNet CIDR must be added to the URLs list." + type = object({ + http_proxy_url = optional(string) + https_proxy_url = optional(string) + no_proxy_url_list = optional(list(string), []) + trusted_ca = optional(string) + }) + default = null +} + +variable "local_account_disabled" { + type = bool + default = null + description = "(Optional) - If `true` local accounts will be disabled. Defaults to `false`." +} + +variable "node_os_channel_upgrade" { + type = string + default = null + description = " (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`." +} + +variable "open_service_mesh_enabled" { + type = bool + default = null + description = "Is Open Service Mesh enabled?" } \ No newline at end of file diff --git a/example/complete/main.tf b/example/complete/main.tf index b076c92..ec96de9 100644 --- a/example/complete/main.tf +++ b/example/complete/main.tf @@ -1,29 +1,40 @@ -module "aks_main" { - - source = "git::https://https://github.com/tothenew/terraform-azure-aks.git?ref=aks-v1" - resource_group = "RG_for_AKS" - location = "eastus2" - vm_size = "Standard_DS2_v2" - virtual_network_address = "10.0.0.0/8" - subnet_address = "10.0.1.0/16" +provider "azurerm" { + features {} +} - create_additional_node_pool = true +resource "azurerm_resource_group" "rg" { + name = "aks-test-demo" + location = "Central India" +} - # if create_additional_node_pool = true then Add node pool configurations +module "vnet" { + source = "git::https://github.com/tothenew/terraform-azure-vnet.git" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + address_space = "10.41.0.0/20" - additional_node_pools = { + virtual_network_peering = false - "qa" = { - vm_size = "Standard_DS2_v2" - os_disk_size_gb = 52 - enable_auto_scaling = true - availability_zones = [] - node_count = 1 - min_count = 1 - max_count = 10 - max_pods = 110 - node_labels = {} - taints = [] + subnets = { + "aks_subnet" = { + address_prefixes = ["10.41.1.0/24"] + associate_with_route_table = false + is_natgateway = false + is_nsg = true + service_delegation = false } } } + +module "aks_main" { + + source = "../.." + resource_group = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + vnet_subnet_id = module.vnet.subnet_ids["aks_subnet"] + service_cidr = "10.41.16.0/22" + vnet_address_space = "10.41.0.0/20" + aks_pod_cidr = "10.41.22.0/22" + + create_additional_node_pool = false +} diff --git a/main.tf b/main.tf index 7381ef6..06bdf33 100644 --- a/main.tf +++ b/main.tf @@ -1,67 +1,128 @@ -################################################################################################## -#### Azure Resource Group #### -################################################################################################## - -resource "azurerm_resource_group" "rg" { - name = var.resource_group - location = var.location -} - -################################################################################################## -#### Azure Kubernetes Cluster #### -################################################################################################## - -resource "azurerm_kubernetes_cluster" "cluster" { - name = var.prefix - resource_group_name = azurerm_resource_group.rg.name - location = azurerm_resource_group.rg.location - dns_prefix = var.dns_prefix - kubernetes_version = var.k8s_version - sku_tier = var.sku_tier +resource "azurerm_kubernetes_cluster" "aks_cluster" { + name = var.prefix + resource_group_name = var.resource_group + location = var.location + dns_prefix = var.dns_prefix + kubernetes_version = var.k8s_version + sku_tier = var.sku_tier + support_plan = var.support_plan + local_account_disabled = var.local_account_disabled + node_os_channel_upgrade = var.node_os_channel_upgrade + open_service_mesh_enabled = var.open_service_mesh_enabled + automatic_channel_upgrade = var.automatic_channel_upgrade + azure_policy_enabled = var.azure_policy_enabled + private_cluster_enabled = var.private_cluster_enabled + private_dns_zone_id = var.private_cluster_enabled ? local.private_dns_zone : null + node_resource_group = var.node_resource_group + oidc_issuer_enabled = var.oidc_issuer_enabled + http_application_routing_enabled = var.http_application_routing_enabled default_node_pool { - name = "dev" - vm_size = var.vm_size - enable_auto_scaling = var.auto_scaling_default_node - node_count = var.node_count - min_count = var.node_min_count - max_count = var.node_max_count - max_pods = var.max_pods + name = local.default_node_pool.name + vm_size = local.default_node_pool.vm_size + zones = local.default_node_pool.zones + enable_auto_scaling = local.default_node_pool.enable_auto_scaling + node_count = local.default_node_pool.enable_auto_scaling ? null : local.default_node_pool.node_count + min_count = local.default_node_pool.enable_auto_scaling ? local.default_node_pool.min_count : null + max_count = local.default_node_pool.enable_auto_scaling ? local.default_node_pool.max_count : null + max_pods = local.default_node_pool.max_pods + os_disk_type = local.default_node_pool.os_disk_type + os_disk_size_gb = local.default_node_pool.os_disk_size_gb + type = local.default_node_pool.type + vnet_subnet_id = local.default_node_pool.vnet_subnet_id + node_labels = local.default_node_pool.node_labels + scale_down_mode = local.default_node_pool.scale_down_mode + + tags = merge(var.default_tags, var.common_tags, tomap({ + "Name" : "${var.name_prefix}", + "Environment" : "Dev" + })) } + dynamic "auto_scaler_profile" { + for_each = var.auto_scaler_profile != null ? [var.auto_scaler_profile] : [] + content { + balance_similar_node_groups = try(auto_scaler_profile.value.balance_similar_node_groups, null) + expander = try(auto_scaler_profile.value.expander, null) + max_graceful_termination_sec = try(auto_scaler_profile.value.max_graceful_termination_sec, null) + max_node_provisioning_time = try(auto_scaler_profile.value.max_node_provisioning_time, null) + max_unready_nodes = try(auto_scaler_profile.value.max_unready_nodes, null) + max_unready_percentage = try(auto_scaler_profile.value.max_unready_percentage, null) + new_pod_scale_up_delay = try(auto_scaler_profile.value.new_pod_scale_up_delay, null) + scale_down_delay_after_add = try(auto_scaler_profile.value.scale_down_delay_after_add, null) + scale_down_delay_after_delete = try(auto_scaler_profile.value.scale_down_delay_after_delete, null) + scale_down_delay_after_failure = try(auto_scaler_profile.value.scale_down_delay_after_failure, null) + scan_interval = try(auto_scaler_profile.value.scan_interval, null) + scale_down_unneeded = try(auto_scaler_profile.value.scale_down_unneeded, null) + scale_down_unready = try(auto_scaler_profile.value.scale_down_unready, null) + scale_down_utilization_threshold = try(auto_scaler_profile.value.scale_down_utilization_threshold, null) + empty_bulk_delete_max = try(auto_scaler_profile.value.empty_bulk_delete_max, null) + skip_nodes_with_local_storage = try(auto_scaler_profile.value.skip_nodes_with_local_storage, null) + skip_nodes_with_system_pods = try(auto_scaler_profile.value.skip_nodes_with_system_pods, null) + } + } identity { type = "SystemAssigned" } - dynamic "network_profile" { - for_each = var.network_plugin == "kubenet" ? [1] : [] + # oms_agent { + # log_analytics_workspace_id = var.oms_log_analytics_workspace_id != "" ? var.oms_log_analytics_workspace_id : data.azurerm_log_analytics_workspace.example.workspace_id + # } + + dynamic "oms_agent" { + for_each = var.oms_log_analytics_workspace_id != "" ? [1] : [] content { - network_plugin = var.network_plugin + log_analytics_workspace_id = var.oms_log_analytics_workspace_id } } - dynamic "network_profile" { - for_each = var.network_plugin == "azure" ? [1] : [] - content { - network_plugin = var.network_plugin - network_plugin_mode = "Overlay" - dns_service_ip = var.dns_service_ip - pod_cidr = var.pod_cidr - service_cidr = var.service_cidr + network_profile { + network_plugin = var.network_plugin + network_policy = var.network_plugin == "azure" ? "azure" : null + network_mode = var.network_plugin == "azure" ? "transparent" : null + dns_service_ip = cidrhost(var.service_cidr, 10) + service_cidr = var.service_cidr + load_balancer_sku = var.load_balancer_sku + outbound_type = var.outbound_type + pod_cidr = var.network_plugin == "kubenet" ? var.aks_pod_cidr : null + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ + "load_balancer_profile" + ] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } } } - dynamic "ingress_application_gateway" { - for_each = try(var.ingress_application_gateway.gateway_id, null) == null ? [] : [1] + dynamic "key_vault_secrets_provider" { + for_each = var.key_vault_secrets_provider[*] + content { + secret_rotation_enabled = key_vault_secrets_provider.value.secret_rotation_enabled + secret_rotation_interval = key_vault_secrets_provider.value.secret_rotation_interval + } + } + dynamic "http_proxy_config" { + for_each = var.aks_http_proxy_settings != null ? ["enabled"] : [] content { - gateway_id = var.ingress_application_gateway.gateway_id - subnet_cidr = var.ingress_application_gateway.subnet_cidr - subnet_id = var.ingress_application_gateway.subnet_id + http_proxy = var.aks_http_proxy_settings.http_proxy_url + https_proxy = var.aks_http_proxy_settings.https_proxy_url + no_proxy = distinct(flatten(concat(local.default_no_proxy_url_list, var.aks_http_proxy_settings.no_proxy_url_list))) + trusted_ca = var.aks_http_proxy_settings.trusted_ca } } - tags = var.tags + tags = merge(var.default_tags, var.common_tags, tomap({ + "Name" : "${var.name_prefix}", + "Environment" : "Dev" + })) } @@ -70,9 +131,9 @@ resource "azurerm_kubernetes_cluster" "cluster" { ################################################################################################## resource "local_file" "kubeconfig" { - depends_on = [azurerm_kubernetes_cluster.cluster] + depends_on = [azurerm_kubernetes_cluster.aks_cluster] filename = "kubeconfig" - content = azurerm_kubernetes_cluster.cluster.kube_config_raw + content = azurerm_kubernetes_cluster.aks_cluster.kube_config_raw } ################################################################################################## @@ -83,7 +144,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "aks" { for_each = var.create_additional_node_pool ? var.additional_node_pools : {} - kubernetes_cluster_id = azurerm_kubernetes_cluster.cluster.id + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks_cluster.id name = substr(each.key, 0, 12) vm_size = each.value.vm_size @@ -95,37 +156,10 @@ resource "azurerm_kubernetes_cluster_node_pool" "aks" { max_pods = each.value.max_pods node_labels = each.value.node_labels node_taints = each.value.taints + vnet_subnet_id = var.vnet_subnet_id tags = merge(var.default_tags, var.common_tags, { "Name" = "${var.name_prefix}-repo", + "Environment" : "Dev" }) } - -################################################################################################## -#### Azure Virtual Network #### -################################################################################################## - -resource "azurerm_virtual_network" "vnet" { - name = "vnet-k8s" - address_space = [var.virtual_network_address] - location = azurerm_resource_group.rg.location - resource_group_name = azurerm_resource_group.rg.name - count = var.virtual_network_name == "" ? 1 : 0 - tags = merge(var.default_tags, var.common_tags, { - "Name" = "${var.name_prefix}-repo", - }) -} - -################################################################################################## -#### Azure Subnet #### -################################################################################################## - -resource "azurerm_subnet" "subnet" { - count = var.subnet_network_name == "" ? 1 : 0 - name = "subnet-k8s" - resource_group_name = azurerm_resource_group.rg.name - virtual_network_name = var.virtual_network_name == "" ? azurerm_virtual_network.vnet[0].name : var.virtual_network_name - address_prefixes = var.virtual_network_name == "" ? ["10.240.0.0/16"] : [var.subnet_address] -} - -