Skip to content

Commit

Permalink
BREAKING CHANGE: (PSKD-348) Update tool versions to latest
Browse files Browse the repository at this point in the history
  • Loading branch information
riragh committed Sep 20, 2024
1 parent dcf9250 commit b199040
Show file tree
Hide file tree
Showing 18 changed files with 92 additions and 140 deletions.
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
ARG TERRAFORM_VERSION=1.8.2
ARG AZURECLI_VERSION=2.59.0
ARG TERRAFORM_VERSION=1.9.5
ARG AZURECLI_VERSION=2.64.0

FROM hashicorp/terraform:$TERRAFORM_VERSION as terraform
FROM mcr.microsoft.com/azure-cli:$AZURECLI_VERSION
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,10 @@ This project supports two options for running Terraform scripts:
Access to an **Azure Subscription** and an [**Identity**](./docs/user/TerraformAzureAuthentication.md) with the *Contributor* role are required.

#### Terraform Requirements:
- [Terraform](https://www.terraform.io/downloads.html) - v1.8.2
- [Terraform](https://www.terraform.io/downloads.html) - v1.9.6
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) - v1.29.7
- [jq](https://stedolan.github.io/jq/) - v1.6
- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure) - (optional - useful as an alternative to the Azure Portal) - v2.59.0
- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure) - (optional - useful as an alternative to the Azure Portal) - v2.64.0

#### Docker Requirements:
- [Docker](https://docs.docker.com/get-docker/)
Expand Down
4 changes: 2 additions & 2 deletions container-structure-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ commandTests:
- name: "terraform version"
command: "terraform"
args: ["--version"]
expectedOutput: ["Terraform v1.8.2"]
expectedOutput: ["Terraform v1.9.6"]
- name: "python version"
command: "python3"
args: ["--version"]
Expand All @@ -29,7 +29,7 @@ commandTests:
- -c
- |
az version -o tsv
expectedOutput: ["2.59.0\t2.59.0\t1.1.0"]
expectedOutput: ["2.64.0\t2.64.0\t1.1.0"]

metadataTest:
workdir: "/viya4-iac-azure"
Expand Down
6 changes: 3 additions & 3 deletions docs/CONFIG-VARS.md
Original file line number Diff line number Diff line change
Expand Up @@ -127,22 +127,22 @@ The default values for the `subnets` variable are as follows:
aks = {
"prefixes": ["192.168.0.0/23"],
"service_endpoints": ["Microsoft.Sql"],
"private_endpoint_network_policies_enabled": false,
"private_endpoint_network_policies": "Disabled",
"private_link_service_network_policies_enabled": false,
"service_delegations": {},
}
misc = {
"prefixes": ["192.168.2.0/24"],
"service_endpoints": ["Microsoft.Sql"],
"private_endpoint_network_policies_enabled": false,
"private_endpoint_network_policies": "Disabled",
"private_link_service_network_policies_enabled": false,
"service_delegations": {},
}
## If using ha storage then the following is also added
netapp = {
"prefixes": ["192.168.3.0/24"],
"service_endpoints": [],
"private_endpoint_network_policies_enabled": false,
"private_endpoint_network_policies": "Disabled",
"private_link_service_network_policies_enabled": false,
"service_delegations": {
netapp = {
Expand Down
8 changes: 4 additions & 4 deletions examples/sample-input-postgres.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -43,21 +43,21 @@ postgres_servers = {
# aks = {
# "prefixes" : ["192.168.0.0/23"],
# "service_endpoints" : ["Microsoft.Sql"],
# "private_endpoint_network_policies_enabled" : true,
# "private_endpoint_network_policies" : "Enabled",
# "private_link_service_network_policies_enabled" : false,
# "service_delegations" : {},
# }
# misc = {
# "prefixes" : ["192.168.2.0/24"],
# "service_endpoints" : ["Microsoft.Sql"],
# "private_endpoint_network_policies_enabled" : true,
# "private_endpoint_network_policies" : "Enabled",
# "private_link_service_network_policies_enabled" : false,
# "service_delegations" : {},
# }
# netapp = {
# "prefixes" : ["192.168.3.0/24"],
# "service_endpoints" : [],
# "private_endpoint_network_policies_enabled" : false,
# "private_endpoint_network_policies" : "Disabled",
# "private_link_service_network_policies_enabled" : false,
# "service_delegations" : {
# netapp = {
Expand All @@ -69,7 +69,7 @@ postgres_servers = {
# postgresql = {
# "prefixes": ["192.168.4.0/24"],
# "service_endpoints": ["Microsoft.Sql"],
# "private_endpoint_network_policies_enabled": true,
# "private_endpoint_network_policies": "Enabled",
# "private_link_service_network_policies_enabled": false,
# "service_delegations": {
# flexpostgres = {
Expand Down
6 changes: 3 additions & 3 deletions examples/sample-input-singlestore.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -133,22 +133,22 @@ subnets = {
aks = {
"prefixes": ["192.168.0.0/21"],
"service_endpoints": ["Microsoft.Sql"],
"private_endpoint_network_policies_enabled": false,
"private_endpoint_network_policies": "Disabled",
"private_link_service_network_policies_enabled": false,
"service_delegations": {},
}
misc = {
"prefixes": ["192.168.8.0/24"],
"service_endpoints": ["Microsoft.Sql"],
"private_endpoint_network_policies_enabled": false,
"private_endpoint_network_policies": "Disabled",
"private_link_service_network_policies_enabled": false,
"service_delegations": {},
}
## If using ha storage then the following is also added
netapp = {
"prefixes": ["192.168.9.0/24"],
"service_endpoints": [],
"private_endpoint_network_policies_enabled": false,
"private_endpoint_network_policies": "Disabled",
"private_link_service_network_policies_enabled": false,
"service_delegations": {
netapp = {
Expand Down
7 changes: 3 additions & 4 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ module "aks" {
aks_cluster_max_pods = var.default_nodepool_max_pods
aks_cluster_os_disk_size = var.default_nodepool_os_disk_size
aks_cluster_node_vm_size = var.default_nodepool_vm_type
aks_cluster_enable_host_encryption = var.aks_cluster_enable_host_encryption
aks_cluster_host_encryption_enabled = var.aks_cluster_host_encryption_enabled
aks_node_disk_encryption_set_id = var.aks_node_disk_encryption_set_id
aks_cluster_node_admin = var.node_vm_admin
aks_cluster_ssh_public_key = try(file(var.ssh_public_key), "")
Expand All @@ -159,7 +159,6 @@ module "aks" {
aks_network_policy = var.aks_network_policy
aks_network_plugin_mode = var.aks_network_plugin_mode
aks_dns_service_ip = var.aks_dns_service_ip
aks_docker_bridge_cidr = var.aks_docker_bridge_cidr
cluster_egress_type = local.cluster_egress_type
aks_pod_cidr = var.aks_pod_cidr
aks_service_cidr = var.aks_service_cidr
Expand Down Expand Up @@ -202,7 +201,7 @@ module "node_pools" {
os_disk_size = each.value.os_disk_size
# TODO: enable with azurerm v2.37.0
# os_disk_type = each.value.os_disk_type
enable_auto_scaling = each.value.min_nodes == each.value.max_nodes ? false : true
auto_scaling_enabled = each.value.min_nodes == each.value.max_nodes ? false : true
node_count = each.value.min_nodes
min_nodes = each.value.min_nodes == each.value.max_nodes ? null : each.value.min_nodes
max_nodes = each.value.min_nodes == each.value.max_nodes ? null : each.value.max_nodes
Expand All @@ -212,7 +211,7 @@ module "node_pools" {
zones = (var.node_pools_availability_zone == "" || var.node_pools_proximity_placement == true) ? [] : (var.node_pools_availability_zones != null) ? var.node_pools_availability_zones : [var.node_pools_availability_zone]
proximity_placement_group_id = element(coalescelist(azurerm_proximity_placement_group.proximity[*].id, [""]), 0)
orchestrator_version = var.kubernetes_version
enable_host_encryption = var.aks_cluster_enable_host_encryption
host_encryption_enabled = var.aks_cluster_host_encryption_enabled
tags = var.tags
}

Expand Down
33 changes: 16 additions & 17 deletions modules/aks_node_pool/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,51 +4,50 @@
# Reference: https://www.terraform.io/docs/providers/azurerm/r/kubernetes_cluster_node_pool.html

resource "azurerm_kubernetes_cluster_node_pool" "autoscale_node_pool" {
count = var.enable_auto_scaling ? 1 : 0
count = var.auto_scaling_enabled ? 1 : 0
name = var.node_pool_name
kubernetes_cluster_id = var.aks_cluster_id
vnet_subnet_id = var.vnet_subnet_id
zones = var.zones
fips_enabled = var.fips_enabled
enable_host_encryption = var.enable_host_encryption
host_encryption_enabled = var.host_encryption_enabled
proximity_placement_group_id = var.proximity_placement_group_id == "" ? null : var.proximity_placement_group_id
vm_size = var.machine_type
os_disk_size_gb = var.os_disk_size
# TODO: enable after azurerm v2.37.0
# os_disk_type = var.os_disk_type
os_type = var.os_type
enable_auto_scaling = var.enable_auto_scaling
# Still in preview, revisit if needed later - https://docs.microsoft.com/en-us/azure/aks/use-multiple-node-pools#assign-a-public-ip-per-node-for-your-node-pools-preview
# enable_node_public_ip = var.enable_node_public_ip
node_count = var.node_count
max_count = var.max_nodes
min_count = var.min_nodes
max_pods = var.max_pods
node_labels = var.node_labels
node_taints = var.node_taints
orchestrator_version = var.orchestrator_version
tags = var.tags
os_type = var.os_type
auto_scaling_enabled = var.auto_scaling_enabled
node_public_ip_enabled = var.node_public_ip_enabled
node_count = var.node_count
max_count = var.max_nodes
min_count = var.min_nodes
max_pods = var.max_pods
node_labels = var.node_labels
node_taints = var.node_taints
orchestrator_version = var.orchestrator_version
tags = var.tags

lifecycle {
ignore_changes = [node_count]
}
}

resource "azurerm_kubernetes_cluster_node_pool" "static_node_pool" {
count = var.enable_auto_scaling ? 0 : 1
count = var.auto_scaling_enabled ? 0 : 1
name = var.node_pool_name
kubernetes_cluster_id = var.aks_cluster_id
vnet_subnet_id = var.vnet_subnet_id
zones = var.zones
fips_enabled = var.fips_enabled
enable_host_encryption = var.enable_host_encryption
host_encryption_enabled = var.host_encryption_enabled
proximity_placement_group_id = var.proximity_placement_group_id == "" ? null : var.proximity_placement_group_id
vm_size = var.machine_type
os_disk_size_gb = var.os_disk_size
# TODO: enable after azurerm v2.37.0
# os_disk_type = var.os_disk_type
os_type = var.os_type
enable_auto_scaling = var.enable_auto_scaling
auto_scaling_enabled = var.auto_scaling_enabled
node_count = var.node_count
max_count = var.max_nodes
min_count = var.min_nodes
Expand Down
30 changes: 8 additions & 22 deletions modules/aks_node_pool/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ variable "fips_enabled" {
default = false
}

variable "enable_host_encryption" {
variable "host_encryption_enabled" {
description = "Enables host encryption on all the nodes in the Node Pool. Changing this forces a new resource to be created."
type = bool
default = false
Expand Down Expand Up @@ -65,12 +65,18 @@ variable "node_count" {
default = 1
}

variable "enable_auto_scaling" {
variable "auto_scaling_enabled" {
description = "Whether to enable auto-scaler."
type = bool
default = false
}

variable "node_public_ip_enabled" {
description = "Should nodes in this Node Pool have a Public IP Address"
type = bool
default = false
}

variable "max_pods" {
description = "The maximum number of pods that can run on each agent. Changing this forces a new resource to be created."
type = number
Expand Down Expand Up @@ -116,23 +122,3 @@ variable "proximity_placement_group_id" {
type = string
default = ""
}

# For future - https://docs.microsoft.com/en-us/azure/aks/spot-node-pool
#
# variable "priority" {
# description = "The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created."
# type = string
# default = "Regular"
# }

# variable "eviction_policy" {
# description = "The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. An Eviction Policy can only be configured when priority is set to Spot"
# type = string
# default = null
# }

# variable "spot_max_price" {
# description = "The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created."
# type = number
# default = -1
# }
55 changes: 29 additions & 26 deletions modules/azure_aks/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,9 @@ resource "azurerm_kubernetes_cluster" "aks" {

# https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions
# az aks get-versions --location eastus -o table
kubernetes_version = var.kubernetes_version
api_server_authorized_ip_ranges = var.aks_cluster_endpoint_public_access_cidrs
private_cluster_enabled = var.aks_private_cluster
private_dns_zone_id = var.aks_private_cluster && var.aks_cluster_private_dns_zone_id != "" ? var.aks_cluster_private_dns_zone_id : (var.aks_private_cluster ? "System" : null)
kubernetes_version = var.kubernetes_version
private_cluster_enabled = var.aks_private_cluster
private_dns_zone_id = var.aks_private_cluster && var.aks_cluster_private_dns_zone_id != "" ? var.aks_cluster_private_dns_zone_id : (var.aks_private_cluster ? "System" : null)

network_profile {
# Docs on AKS Advanced Networking config
Expand All @@ -37,11 +36,17 @@ resource "azurerm_kubernetes_cluster" "aks" {
service_cidr = var.aks_service_cidr
dns_service_ip = var.aks_dns_service_ip
pod_cidr = var.aks_network_plugin == "kubenet" ? var.aks_pod_cidr : null
docker_bridge_cidr = var.aks_docker_bridge_cidr
outbound_type = var.cluster_egress_type
load_balancer_sku = "standard"
}

dynamic "api_server_access_profile" {
for_each = length(var.aks_cluster_endpoint_public_access_cidrs) > 0 ? [1] : []
content {
authorized_ip_ranges = var.aks_cluster_endpoint_public_access_cidrs
}
}

dynamic "linux_profile" {
for_each = var.aks_cluster_ssh_public_key == "" ? [] : [1]
content {
Expand All @@ -55,31 +60,29 @@ resource "azurerm_kubernetes_cluster" "aks" {
dynamic "azure_active_directory_role_based_access_control" {
for_each = var.rbac_aad_enabled ? [1] : []
content {
managed = true
tenant_id = var.rbac_aad_tenant_id
admin_group_object_ids = var.rbac_aad_admin_group_object_ids
azure_rbac_enabled = false
tenant_id = var.rbac_aad_tenant_id
admin_group_object_ids = var.rbac_aad_admin_group_object_ids
azure_rbac_enabled = false
}
}

default_node_pool {
name = "system"
vm_size = var.aks_cluster_node_vm_size
zones = var.aks_availability_zones
enable_auto_scaling = var.aks_cluster_node_auto_scaling
enable_node_public_ip = false
node_labels = {}
node_taints = []
fips_enabled = var.fips_enabled
enable_host_encryption = var.aks_cluster_enable_host_encryption
max_pods = var.aks_cluster_max_pods
os_disk_size_gb = var.aks_cluster_os_disk_size
max_count = var.aks_cluster_max_nodes
min_count = var.aks_cluster_min_nodes
node_count = var.aks_cluster_node_count
vnet_subnet_id = var.aks_vnet_subnet_id
tags = var.aks_cluster_tags
orchestrator_version = var.kubernetes_version
name = "system"
vm_size = var.aks_cluster_node_vm_size
zones = var.aks_availability_zones
auto_scaling_enabled = var.aks_cluster_node_auto_scaling
node_public_ip_enabled = false
node_labels = {}
fips_enabled = var.fips_enabled
host_encryption_enabled = var.aks_cluster_host_encryption_enabled
max_pods = var.aks_cluster_max_pods
os_disk_size_gb = var.aks_cluster_os_disk_size
max_count = var.aks_cluster_max_nodes
min_count = var.aks_cluster_min_nodes
node_count = var.aks_cluster_node_count
vnet_subnet_id = var.aks_vnet_subnet_id
tags = var.aks_cluster_tags
orchestrator_version = var.kubernetes_version
}

dynamic "service_principal" {
Expand Down
14 changes: 1 addition & 13 deletions modules/azure_aks/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ variable "aks_cluster_max_pods" {
default = 110
}

variable "aks_cluster_enable_host_encryption" {
variable "aks_cluster_host_encryption_enabled" {
description = "Enables host encryption on all the nodes in the Default Node Pool"
type = bool
default = false
Expand Down Expand Up @@ -186,18 +186,6 @@ variable "aks_dns_service_ip" {
condition = var.aks_dns_service_ip != null ? can(regex("^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", var.aks_dns_service_ip)) : false
error_message = "ERROR: aks_dns_service_ip - value must not be null and must be a valid IP address."
}

}

variable "aks_docker_bridge_cidr" {
description = "IP address (in CIDR notation) used as the Docker bridge IP address on nodes. Changing this forces a new resource to be created."
type = string
default = "172.17.0.1/16"
validation {
condition = var.aks_docker_bridge_cidr != null ? can(cidrnetmask(var.aks_docker_bridge_cidr)) : false
error_message = "ERROR: aks_docker_bridge_cidr - value must not be null and must be valid CIDR."
}

}

variable "aks_pod_cidr" {
Expand Down
Loading

0 comments on commit b199040

Please sign in to comment.