diff --git a/220_app_routing/README.md b/220_app_routing/README.md index 96b1001..bc56ec5 100644 --- a/220_app_routing/README.md +++ b/220_app_routing/README.md @@ -1,5 +1,19 @@ # AKS Web App Routing demystified +When you expose your AKS applications, you typically use ingress. With ingress, you will need to manage: + +1) Private and public Ingress Controllers +2) DNS custom domain names +3) TLS certificates + +You wish if just there were a managed service that make this task easy ? +Now that service exist. It is called Application Routing. +Here is how it works. + +![](images/architecture.png) + +Disclaimer: This video is part of my Udemy course: https://www.udemy.com/course/learn-aks-network-security + ```sh az group create -n rg-aks-cluster -l swedencentral diff --git a/220_app_routing/ama-metrics-settings-configmap.yaml b/220_app_routing/ama-metrics-settings-configmap.yaml new file mode 100644 index 0000000..a5aea90 --- /dev/null +++ b/220_app_routing/ama-metrics-settings-configmap.yaml @@ -0,0 +1,58 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: ama-metrics-settings-configmap + namespace: kube-system +data: + schema-version: + #string.used by agent to parse config. supported versions are {v1}. Configs with other schema versions will be rejected by the agent. + v1 + config-version: + #string.used by customer to keep track of this config file's version in their source control/repository (max allowed 10 chars, other chars will be truncated) + ver1 + prometheus-collector-settings: |- + cluster_alias = "" + default-scrape-settings-enabled: |- + kubelet = true + coredns = false + cadvisor = true + kubeproxy = false + apiserver = false + kubestate = true + nodeexporter = true + windowsexporter = false + windowskubeproxy = false + kappiebasic = true + prometheuscollectorhealth = false + # Regex for which namespaces to scrape through pod annotation based scraping. + # This is none by default. Use '.*' to scrape all namespaces of annotated pods. + pod-annotation-based-scraping: |- + podannotationnamespaceregex = ".*" + default-targets-metrics-keep-list: |- + kubelet = "" + coredns = "" + cadvisor = "" + kubeproxy = "" + apiserver = "" + kubestate = "" + nodeexporter = "" + windowsexporter = "" + windowskubeproxy = "" + podannotations = "" + kappiebasic = "" + minimalingestionprofile = true + default-targets-scrape-interval-settings: |- + kubelet = "30s" + coredns = "30s" + cadvisor = "30s" + kubeproxy = "30s" + apiserver = "30s" + kubestate = "30s" + nodeexporter = "30s" + windowsexporter = "30s" + windowskubeproxy = "30s" + kappiebasic = "30s" + prometheuscollectorhealth = "30s" + podannotations = "30s" + debug-mode: |- + enabled = false \ No newline at end of file diff --git a/220_app_routing/app.yaml b/220_app_routing/app.yaml index e992ccc..4dcef37 100644 --- a/220_app_routing/app.yaml +++ b/220_app_routing/app.yaml @@ -1,3 +1,8 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: webapp +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -42,7 +47,6 @@ metadata: spec: ingressClassName: webapprouting.kubernetes.azure.com rules: - # - host: - http: paths: - backend: diff --git a/220_app_routing/images/architecture.png b/220_app_routing/images/architecture.png new file mode 100644 index 0000000..7840cc6 Binary files /dev/null and b/220_app_routing/images/architecture.png differ diff --git a/220_app_routing/images/resources.png b/220_app_routing/images/resources.png new file mode 100644 index 0000000..3521030 Binary files /dev/null and b/220_app_routing/images/resources.png differ diff --git a/220_app_routing/ingress-internal.yaml b/220_app_routing/ingress-internal.yaml new file mode 100644 index 0000000..b2f7dc4 --- /dev/null +++ b/220_app_routing/ingress-internal.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: aks-helloworld-internal + namespace: webapp +spec: + ingressClassName: nginx-internal # webapprouting.kubernetes.azure.com + rules: + - host: aks.internal # hello.aks.internal + http: + paths: + - backend: + service: + name: aks-helloworld + port: + number: 80 + path: / + pathType: Prefix \ No newline at end of file diff --git a/220_app_routing/ingress-tls.yaml b/220_app_routing/ingress-tls.yaml index 0d1cd32..ec0e945 100644 --- a/220_app_routing/ingress-tls.yaml +++ b/220_app_routing/ingress-tls.yaml @@ -2,8 +2,7 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: - kubernetes.azure.com/tls-cert-keyvault-uri: "https://kv4aks220.vault.azure.net/certificates/aks-ingress-tls-01" - # kubernetes.azure.com/tls-cert-keyvault-uri: "https://kvakscert01.vault.azure.net/certificates/aks-app-cert" + kubernetes.azure.com/tls-cert-keyvault-uri: "https://kv42aks220.vault.azure.net/certificates/aks-ingress-tls-01" name: aks-helloworld namespace: webapp spec: diff --git a/220_app_routing/terraform/aks.tf b/220_app_routing/terraform/aks.tf index 8cdd571..a80a404 100644 --- a/220_app_routing/terraform/aks.tf +++ b/220_app_routing/terraform/aks.tf @@ -29,10 +29,15 @@ resource "azurerm_kubernetes_cluster" "aks" { } key_vault_secrets_provider { - secret_rotation_enabled = true + secret_rotation_enabled = true secret_rotation_interval = "2m" } + monitor_metrics { + annotations_allowed = null + labels_allowed = null + } + lifecycle { ignore_changes = [ default_node_pool.0.upgrade_settings @@ -42,9 +47,9 @@ resource "azurerm_kubernetes_cluster" "aks" { # Required to create internal Load Balancer resource "azurerm_role_assignment" "network-contributor" { - scope = azurerm_subnet.snet-aks.id - role_definition_name = "Network Contributor" - principal_id = azurerm_kubernetes_cluster.aks.identity.0.principal_id + scope = azurerm_subnet.snet-aks.id + role_definition_name = "Network Contributor" + principal_id = azurerm_kubernetes_cluster.aks.identity.0.principal_id } resource "terraform_data" "aks-get-credentials" { @@ -55,4 +60,4 @@ resource "terraform_data" "aks-get-credentials" { provisioner "local-exec" { command = "az aks get-credentials -n ${azurerm_kubernetes_cluster.aks.name} -g ${azurerm_kubernetes_cluster.aks.resource_group_name} --overwrite-existing" } -} \ No newline at end of file +} diff --git a/220_app_routing/terraform/grafana.tf b/220_app_routing/terraform/grafana.tf new file mode 100644 index 0000000..f24992e --- /dev/null +++ b/220_app_routing/terraform/grafana.tf @@ -0,0 +1,41 @@ +resource "azurerm_dashboard_grafana" "grafana" { + name = "azure-grafana-${var.prefix}" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + sku = "Standard" + grafana_major_version = "10" + zone_redundancy_enabled = false + api_key_enabled = true + deterministic_outbound_ip_enabled = true + public_network_access_enabled = true + + azure_monitor_workspace_integrations { + resource_id = azurerm_monitor_workspace.prometheus.id + } + + identity { + type = "SystemAssigned" + } +} + +# data "azurerm_client_config" "current" {} + +resource "azurerm_role_assignment" "role_grafana_admin" { + scope = azurerm_dashboard_grafana.grafana.id + role_definition_name = "Grafana Admin" + principal_id = data.azurerm_client_config.current.object_id +} + +resource "azurerm_role_assignment" "role_monitoring_data_reader" { + scope = azurerm_monitor_workspace.prometheus.id + role_definition_name = "Monitoring Data Reader" + principal_id = azurerm_dashboard_grafana.grafana.identity.0.principal_id +} + +data "azurerm_subscription" "current" {} + +resource "azurerm_role_assignment" "role_monitoring_reader" { + scope = data.azurerm_subscription.current.id + role_definition_name = "Monitoring Reader" + principal_id = azurerm_dashboard_grafana.grafana.identity.0.principal_id +} diff --git a/220_app_routing/terraform/identity_webapprouting.tf b/220_app_routing/terraform/identity_webapprouting.tf index b4f4981..9a08184 100644 --- a/220_app_routing/terraform/identity_webapprouting.tf +++ b/220_app_routing/terraform/identity_webapprouting.tf @@ -1,5 +1,4 @@ data "azurerm_user_assigned_identity" "webapp_routing" { - # name = "webapprouting-${azurerm_kubernetes_cluster.aks.name}" name = split("/", azurerm_kubernetes_cluster.aks.web_app_routing.0.web_app_routing_identity.0.user_assigned_identity_id)[8] resource_group_name = azurerm_kubernetes_cluster.aks.node_resource_group } diff --git a/220_app_routing/terraform/output.tf b/220_app_routing/terraform/output.tf index 96b2c2a..46e5742 100644 --- a/220_app_routing/terraform/output.tf +++ b/220_app_routing/terraform/output.tf @@ -6,6 +6,10 @@ output "custom_domain_name" { value = azurerm_dns_zone.dns_zone.name } +output "private_domain_name" { + value = azurerm_private_dns_zone.private_dns_zone.name +} + output "keyvault_tls_cert_url" { value = azurerm_key_vault_certificate.aks-ingress-tls-01.versionless_id } \ No newline at end of file diff --git a/220_app_routing/terraform/prometheus-dce.tf b/220_app_routing/terraform/prometheus-dce.tf new file mode 100644 index 0000000..ab72b34 --- /dev/null +++ b/220_app_routing/terraform/prometheus-dce.tf @@ -0,0 +1,14 @@ +resource "azurerm_monitor_data_collection_endpoint" "dce-prometheus" { + name = "dce-prometheus" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + kind = "Linux" + public_network_access_enabled = true +} + +# not required +resource "azurerm_monitor_data_collection_rule_association" "dcra-dce-prometheus-aks" { +# name = "configurationAccessEndpoint" # "dcra-dce-prometheus-aks" # # name is required when data_collection_rule_id is specified. And when data_collection_endpoint_id is specified, the name is populated with configurationAccessEndpoint + target_resource_id = azurerm_kubernetes_cluster.aks.id + data_collection_endpoint_id = azurerm_monitor_data_collection_endpoint.dce-prometheus.id +} \ No newline at end of file diff --git a/220_app_routing/terraform/prometheus-dcr.tf b/220_app_routing/terraform/prometheus-dcr.tf new file mode 100644 index 0000000..b0030a0 --- /dev/null +++ b/220_app_routing/terraform/prometheus-dcr.tf @@ -0,0 +1,34 @@ +resource "azurerm_monitor_data_collection_rule" "dcr-prometheus" { + name = "dcr-prometheus" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + data_collection_endpoint_id = azurerm_monitor_data_collection_endpoint.dce-prometheus.id + kind = "Linux" + description = "DCR for Azure Monitor Metrics Profile (Managed Prometheus)" + + data_sources { + prometheus_forwarder { + name = "PrometheusDataSource" + streams = ["Microsoft-PrometheusMetrics"] + } + } + + destinations { + monitor_account { + monitor_account_id = azurerm_monitor_workspace.prometheus.id + name = azurerm_monitor_workspace.prometheus.name + } + } + + data_flow { + streams = ["Microsoft-PrometheusMetrics"] + destinations = [azurerm_monitor_workspace.prometheus.name] + } +} + +resource "azurerm_monitor_data_collection_rule_association" "dcra-dcr-prometheus-aks" { + name = "dcra-dcr-prometheus-aks" + target_resource_id = azurerm_kubernetes_cluster.aks.id + data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr-prometheus.id + description = "Association of DCR. Deleting this association will break the data collection for this AKS Cluster." +} diff --git a/220_app_routing/terraform/prometheus-recording-rules.tf b/220_app_routing/terraform/prometheus-recording-rules.tf new file mode 100644 index 0000000..5d717c1 --- /dev/null +++ b/220_app_routing/terraform/prometheus-recording-rules.tf @@ -0,0 +1,211 @@ +resource "azurerm_monitor_alert_prometheus_rule_group" "recording-rules-nodes" { + name = "recording-rules-nodes" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + cluster_name = azurerm_kubernetes_cluster.aks.name + rule_group_enabled = true + interval = "PT1M" + scopes = [azurerm_monitor_workspace.prometheus.id] + + rule { + record = "instance:node_num_cpu:sum" + expression = "count without (cpu, mode) (node_cpu_seconds_total{job=\"node\",mode=\"idle\"})" + enabled = true + } + + rule { + record = "instance:node_cpu_utilisation:rate5m" + expression = "1 - avg without (cpu) (sum without (mode) (rate(node_cpu_seconds_total{job=\"node\", mode=~\"idle|iowait|steal\"}[5m])))" + enabled = true + } + + rule { + record = "instance:node_load1_per_cpu:ratio" + expression = "(node_load1{job=\"node\"}/ instance:node_num_cpu:sum{job=\"node\"})" + enabled = true + } + + rule { + record = "instance:node_memory_utilisation:ratio" + expression = "1 - ((node_memory_MemAvailable_bytes{job=\"node\"} or (node_memory_Buffers_bytes{job=\"node\"} + node_memory_Cached_bytes{job=\"node\"} + node_memory_MemFree_bytes{job=\"node\"} + node_memory_Slab_bytes{job=\"node\"})) / node_memory_MemTotal_bytes{job=\"node\"})" + enabled = true + } + + rule { + record = "instance:node_vmstat_pgmajfault:rate5m" + expression = "rate(node_vmstat_pgmajfault{job=\"node\"}[5m])" + enabled = true + } + + rule { + record = "instance_device:node_disk_io_time_seconds:rate5m" + expression = "rate(node_disk_io_time_seconds_total{job=\"node\", device!=\"\"}[5m])" + enabled = true + } + + rule { + record = "instance_device:node_disk_io_time_weighted_seconds:rate5m" + expression = "rate(node_disk_io_time_weighted_seconds_total{job=\"node\", device!=\"\"}[5m])" + enabled = true + } + + rule { + record = "instance:node_network_receive_bytes_excluding_lo:rate5m" + expression = "sum without (device) (rate(node_network_receive_bytes_total{job=\"node\", device!=\"lo\"}[5m]))" + enabled = true + } + + rule { + record = "instance:node_network_transmit_bytes_excluding_lo:rate5m" + expression = "sum without (device) (rate(node_network_transmit_bytes_total{job=\"node\", device!=\"lo\"}[5m]))" + enabled = true + } + + rule { + record = "instance:node_network_receive_drop_excluding_lo:rate5m" + expression = "sum without (device) (rate(node_network_receive_drop_total{job=\"node\", device!=\"lo\"}[5m]))" + enabled = true + } + + rule { + record = "instance:node_network_transmit_drop_excluding_lo:rate5m" + expression = "sum without (device) (rate(node_network_transmit_drop_total{job=\"node\", device!=\"lo\"}[5m]))" + enabled = true + } +} + +resource "azurerm_monitor_alert_prometheus_rule_group" "recording-rules-k8s" { + name = "recording-rules-k8s" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + cluster_name = azurerm_kubernetes_cluster.aks.name + rule_group_enabled = true + interval = "PT1M" + scopes = [azurerm_monitor_workspace.prometheus.id] + + rule { + record = "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate" + expression = "sum by (cluster, namespace, pod, container) (irate(container_cpu_usage_seconds_total{job=\"cadvisor\", image!=\"\"}[5m])) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"}))" + enabled = true + } + + rule { + record = "node_namespace_pod_container:container_memory_working_set_bytes" + expression = "container_memory_working_set_bytes{job=\"cadvisor\", image!=\"\"}* on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info{node!=\"\"}))" + enabled = true + } + + rule { + record = "node_namespace_pod_container:container_memory_rss" + expression = "container_memory_rss{job=\"cadvisor\", image!=\"\"}* on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info{node!=\"\"}))" + enabled = true + } + + rule { + record = "node_namespace_pod_container:container_memory_cache" + expression = "container_memory_cache{job=\"cadvisor\", image!=\"\"}* on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info{node!=\"\"}))" + enabled = true + } + + rule { + record = "node_namespace_pod_container:container_memory_swap" + expression = "container_memory_swap{job=\"cadvisor\", image!=\"\"}* on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info{node!=\"\"}))" + enabled = true + } + + rule { + record = "cluster:namespace:pod_memory:active:kube_pod_container_resource_requests" + expression = "kube_pod_container_resource_requests{resource=\"memory\",job=\"kube-state-metrics\"} * on(namespace, pod, cluster)group_left() max by (namespace, pod, cluster) ((kube_pod_status_phase{phase=~\"Pending|Running\"} == 1))" + enabled = true + } + + rule { + record = "namespace_memory:kube_pod_container_resource_requests:sum" + expression = "sum by (namespace, cluster) (sum by (namespace, pod, cluster) (max by (namespace, pod, container, cluster) (kube_pod_container_resource_requests{resource=\"memory\",job=\"kube-state-metrics\"}) * on(namespace, pod, cluster) group_left() max by (namespace, pod, cluster) (kube_pod_status_phase{phase=~\"Pending|Running\"} == 1)))" + enabled = true + } + + rule { + record = "cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests" + expression = "kube_pod_container_resource_requests{resource=\"cpu\",job=\"kube-state-metrics\"} * on (namespace, pod, cluster)group_left() max by (namespace, pod, cluster) ((kube_pod_status_phase{phase=~\"Pending|Running\"} == 1))" + enabled = true + } + + rule { + record = "namespace_cpu:kube_pod_container_resource_requests:sum" + expression = "sum by (namespace, cluster) (sum by(namespace, pod, cluster) (max by(namespace, pod, container, cluster) (kube_pod_container_resource_requests{resource=\"cpu\",job=\"kube-state-metrics\"}) * on(namespace, pod, cluster) group_left() max by (namespace, pod, cluster) (kube_pod_status_phase{phase=~\"Pending|Running\"} == 1)))" + enabled = true + } + + rule { + record = "cluster:namespace:pod_memory:active:kube_pod_container_resource_limits" + expression = "kube_pod_container_resource_limits{resource=\"memory\",job=\"kube-state-metrics\"} * on (namespace, pod, cluster)group_left() max by (namespace, pod, cluster) ((kube_pod_status_phase{phase=~\"Pending|Running\"} == 1))" + enabled = true + } + + rule { + record = "namespace_memory:kube_pod_container_resource_limits:sum" + expression = "sum by (namespace, cluster) (sum by (namespace, pod, cluster) (max by (namespace, pod, container, cluster) (kube_pod_container_resource_limits{resource=\"memory\",job=\"kube-state-metrics\"}) * on(namespace, pod, cluster) group_left() max by (namespace, pod, cluster) (kube_pod_status_phase{phase=~\"Pending|Running\"} == 1)))" + enabled = true + } + + rule { + record = "cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits" + expression = "kube_pod_container_resource_limits{resource=\"cpu\",job=\"kube-state-metrics\"} * on (namespace, pod, cluster)group_left() max by (namespace, pod, cluster) ( (kube_pod_status_phase{phase=~\"Pending|Running\"} == 1) )" + enabled = true + } + + rule { + record = "namespace_cpu:kube_pod_container_resource_limits:sum" + expression = "sum by (namespace, cluster) (sum by (namespace, pod, cluster) (max by(namespace, pod, container, cluster) (kube_pod_container_resource_limits{resource=\"cpu\",job=\"kube-state-metrics\"}) * on(namespace, pod, cluster) group_left() max by (namespace, pod, cluster) (kube_pod_status_phase{phase=~\"Pending|Running\"} == 1)))" + enabled = true + } + + rule { + record = "namespace_workload_pod:kube_pod_owner:relabel" + expression = "max by (cluster, namespace, workload, pod) (label_replace(label_replace(kube_pod_owner{job=\"kube-state-metrics\", owner_kind=\"ReplicaSet\"}, \"replicaset\", \"$1\", \"owner_name\", \"(.*)\") * on(replicaset, namespace) group_left(owner_name) topk by(replicaset, namespace) (1, max by (replicaset, namespace, owner_name) (kube_replicaset_owner{job=\"kube-state-metrics\"})), \"workload\", \"$1\", \"owner_name\", \"(.*)\"))" + labels = { + "workload_type" = "deployment" + } + enabled = true + } + + rule { + record = "namespace_workload_pod:kube_pod_owner:relabel" + expression = "max by (cluster, namespace, workload, pod) (label_replace(kube_pod_owner{job=\"kube-state-metrics\", owner_kind=\"DaemonSet\"}, \"workload\", \"$1\", \"owner_name\", \"(.*)\"))" + labels = { + "workload_type" = "daemonset" + } + enabled = true + } + + rule { + record = "namespace_workload_pod:kube_pod_owner:relabel" + expression = "max by (cluster, namespace, workload, pod) (label_replace(kube_pod_owner{job=\"kube-state-metrics\", owner_kind=\"StatefulSet\"}, \"workload\", \"$1\", \"owner_name\", \"(.*)\"))" + labels = { + "workload_type" = "statefulset" + } + enabled = true + } + + rule { + record = "namespace_workload_pod:kube_pod_owner:relabel" + expression = "max by (cluster, namespace, workload, pod) (label_replace(kube_pod_owner{job=\"kube-state-metrics\", owner_kind=\"Job\"}, \"workload\", \"$1\", \"owner_name\", \"(.*)\"))" + labels = { + "workload_type" = "job" + } + enabled = true + } + + rule { + record = ":node_memory_MemAvailable_bytes:sum" + expression = "sum(node_memory_MemAvailable_bytes{job=\"node\"} or (node_memory_Buffers_bytes{job=\"node\"} + node_memory_Cached_bytes{job=\"node\"} + node_memory_MemFree_bytes{job=\"node\"} + node_memory_Slab_bytes{job=\"node\"})) by (cluster)" + enabled = true + } + + rule { + record = "cluster:node_cpu:ratio_rate5m" + expression = "sum(rate(node_cpu_seconds_total{job=\"node\",mode!=\"idle\",mode!=\"iowait\",mode!=\"steal\"}[5m])) by (cluster) /count(sum(node_cpu_seconds_total{job=\"node\"}) by (cluster, instance, cpu)) by (cluster)" + enabled = true + } +} \ No newline at end of file diff --git a/220_app_routing/terraform/prometheus.tf b/220_app_routing/terraform/prometheus.tf new file mode 100644 index 0000000..94f3621 --- /dev/null +++ b/220_app_routing/terraform/prometheus.tf @@ -0,0 +1,12 @@ +resource "azurerm_monitor_workspace" "prometheus" { + name = "azure-prometheus" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + public_network_access_enabled = true +} + +resource "azurerm_role_assignment" "role_monitoring_data_reader_me" { + scope = azurerm_monitor_workspace.prometheus.id + role_definition_name = "Monitoring Data Reader" + principal_id = data.azurerm_client_config.current.object_id +} \ No newline at end of file diff --git a/220_app_routing/terraform/providers.tf b/220_app_routing/terraform/providers.tf index c7725c9..688f005 100644 --- a/220_app_routing/terraform/providers.tf +++ b/220_app_routing/terraform/providers.tf @@ -6,7 +6,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" - version = "= 3.99.0" + version = "= 3.100.0" } azapi = { @@ -22,10 +22,14 @@ provider "azurerm" { prevent_deletion_if_contains_resources = false } key_vault { - purge_soft_delete_on_destroy = true - recover_soft_deleted_key_vaults = true - purge_soft_deleted_secrets_on_destroy = true - recover_soft_deleted_secrets = true + purge_soft_delete_on_destroy = true + purge_soft_deleted_secrets_on_destroy = true + purge_soft_deleted_keys_on_destroy = true + purge_soft_deleted_certificates_on_destroy = true + recover_soft_deleted_key_vaults = true + recover_soft_deleted_secrets = true + recover_soft_deleted_certificates = true + recover_soft_deleted_keys = true } } } diff --git a/220_app_routing/terraform/tbd.tf b/220_app_routing/terraform/tbd.tf new file mode 100644 index 0000000..213a9cf --- /dev/null +++ b/220_app_routing/terraform/tbd.tf @@ -0,0 +1,36 @@ +resource "azurerm_key_vault" "keyvault2" { + name = "kv42aks${var.prefix}" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + tenant_id = data.azurerm_client_config.current.tenant_id + soft_delete_retention_days = 7 + purge_protection_enabled = false + enabled_for_disk_encryption = false + public_network_access_enabled = true + sku_name = "standard" + enable_rbac_authorization = true +} + +resource "azurerm_key_vault_certificate" "aks-ingress-tls-012" { + name = "aks-ingress-tls-01" + key_vault_id = azurerm_key_vault.keyvault2.id + + certificate { + contents = filebase64("../cert/aks-ingress-tls.pfx") + password = "" + } + + depends_on = [ azurerm_role_assignment.keyvault-secrets-officer ] +} + +resource "azurerm_role_assignment" "keyvault-secrets-officer2" { + scope = azurerm_key_vault.keyvault2.id + role_definition_name = "Key Vault Secrets Officer" + principal_id = data.azurerm_client_config.current.object_id +} + +resource "azurerm_role_assignment" "key-vault-secrets-user2" { + scope = azurerm_key_vault.keyvault2.id + role_definition_name = "Key Vault Secrets User" + principal_id = data.azurerm_user_assigned_identity.webapp_routing.principal_id +} \ No newline at end of file