diff --git a/.infracost/pricing.gob b/.infracost/pricing.gob index ce9e7d2..b000c02 100644 Binary files a/.infracost/pricing.gob and b/.infracost/pricing.gob differ diff --git a/57_filter_egress_traffic_fqdn/commands.ps1 b/57_filter_egress_traffic_fqdn/commands.ps1 index 6baf868..11da3d9 100644 --- a/57_filter_egress_traffic_fqdn/commands.ps1 +++ b/57_filter_egress_traffic_fqdn/commands.ps1 @@ -11,14 +11,15 @@ az aks create -n $AKS_NAME -g $RG_NAME --network-plugin none az aks get-credentials -n $AKS_NAME -g $RG_NAME --overwrite-existing helm repo add cilium https://helm.cilium.io/ +helm repo update -helm upgrade --install cilium cilium/cilium --version 1.14.2 ` +helm upgrade --install cilium cilium/cilium --version 1.15.1 ` --namespace kube-system ` --set aksbyocni.enabled=true ` --set nodeinit.enabled=true ` --set sctp.enabled=true ` --set hubble.enabled=true ` - --set hubble.metrics.enabled="{dns,drop,tcp,flow,icmp,http}" ` + --set hubble.metrics.enabled="{dns,drop,tcp,flow,icmp,http,port-distribution,labelsContext=source_ip\,source_namespace\,source_workload\,destination_ip\,destination_namespace\,destination_workload\,traffic_direction}" ` --set hubble.relay.enabled=true ` --set hubble.ui.enabled=true ` --set hubble.ui.service.type=NodePort ` @@ -26,7 +27,7 @@ helm upgrade --install cilium cilium/cilium --version 1.14.2 ` # --set gatewayAPI.enabled=true # Restart unmanaged Pods (required by new Cilium install) -kubectl delete pods -A -all +kubectl delete pods -A --all # kubectl get pods --all-namespaces -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true | grep '' | awk '{print "-n "$1" "$2}' | xaRG_NAMEs -L 1 -r kubectl delete pod # make sure cilium CLI is installed on your machine (https://github.com/cilium/cilium-cli/releases/tag/v0.15.0) @@ -39,7 +40,7 @@ cilium status --wait cilium connectivity test # deploy sample online service, just to get public IP -$FQDN=(az container create -g $RG_NAME -n aci-app --image nginx:latest --ports 80 --ip-address public --dns-name-label aci-app-931 --query ipAddress.fqdn --output tsv) +$FQDN=(az container create -g $RG_NAME -n aci-app --image nginx:latest --ports 80 --ip-address public --dns-name-label aci-app-931 --query ipAddress.fqdn --output tsv) $FQDN # aci-app-931.westeurope.azurecontainer.io diff --git a/80_aks_backup_tf/Readme.md b/80_aks_backup_tf/Readme.md new file mode 100644 index 0000000..7fcc9e0 --- /dev/null +++ b/80_aks_backup_tf/Readme.md @@ -0,0 +1,40 @@ +# Private Azure Grafana, Prometheus and Log Analytics with AKS + +## Introduction + +With AKS, you can use `Azure Monitor Workspace for Prometheus` and `Azure Managed Grafana` to collect, query and visualize the metrics from AKS. +And to collect logs, you can use `Azure Log Analytics`. + +This lab will provide an implementation for monitoring and logging. + +## Architecture + +![](images/architecture.png) + +## Deploying the resources using Terraform + +To deploy the Terraform configuration files, run the following commands: + +```sh +terraform init + +terraform plan -out tfplan + +terraform apply tfplan +``` + +The following resources will be created. + +![](images/resources.png) + +## Cleanup resources + +To delete the creates resources, run the following command: + +```sh +terraform destroy +``` + +## More readings + +https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/azure-monitor-workspace-manage?tabs=azure-portal diff --git a/80_aks_backup_tf/aks-backup-extenstion.tf b/80_aks_backup_tf/aks-backup-extenstion.tf new file mode 100644 index 0000000..f2793b7 --- /dev/null +++ b/80_aks_backup_tf/aks-backup-extenstion.tf @@ -0,0 +1,22 @@ +resource "azurerm_kubernetes_cluster_extension" "extension" { + name = "backup-extension" + cluster_id = azurerm_kubernetes_cluster.aks.id + extension_type = "Microsoft.DataProtection.Kubernetes" + release_train = "stable" + release_namespace = "dataprotection-microsoft" + configuration_settings = { + "configuration.backupStorageLocation.bucket" = azurerm_storage_container.container.name + "configuration.backupStorageLocation.config.resourceGroup" = azurerm_storage_account.storage.resource_group_name + "configuration.backupStorageLocation.config.storageAccount" = azurerm_storage_account.storage.name + "configuration.backupStorageLocation.config.subscriptionId" = data.azurerm_client_config.current.subscription_id + "credentials.tenantId" = data.azurerm_client_config.current.tenant_id + } +} + +resource "azurerm_role_assignment" "extension_and_storage_account_permission" { + scope = azurerm_storage_account.storage.id + role_definition_name = "Storage Account Contributor" + principal_id = azurerm_kubernetes_cluster_extension.extension.aks_assigned_identity[0].principal_id +} + +data "azurerm_client_config" "current" {} \ No newline at end of file diff --git a/80_aks_backup_tf/aks.tf b/80_aks_backup_tf/aks.tf new file mode 100644 index 0000000..5e30630 --- /dev/null +++ b/80_aks_backup_tf/aks.tf @@ -0,0 +1,29 @@ +resource "azurerm_kubernetes_cluster" "aks" { + name = "aks-cluster" + location = azurerm_resource_group.rg.location + resource_group_name = azurerm_resource_group.rg.name + dns_prefix = "aks" + kubernetes_version = "1.29.0" + + network_profile { + network_plugin = "azure" + network_plugin_mode = "overlay" + ebpf_data_plane = "cilium" + } + + default_node_pool { + name = "systempool" + node_count = 3 + vm_size = "standard_b2als_v2" + } + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_role_assignment" "cluster_msi_contributor_on_snap_rg" { + scope = azurerm_resource_group.rg-backup.id + role_definition_name = "Contributor" + principal_id = azurerm_kubernetes_cluster.aks.identity[0].principal_id +} \ No newline at end of file diff --git a/80_aks_backup_tf/backup_instance.tf b/80_aks_backup_tf/backup_instance.tf new file mode 100644 index 0000000..0b390f7 --- /dev/null +++ b/80_aks_backup_tf/backup_instance.tf @@ -0,0 +1,22 @@ +resource "azurerm_data_protection_backup_instance_kubernetes_cluster" "backup-instance" { + name = "backup-instance" + location = azurerm_resource_group.rg.location + vault_id = azurerm_data_protection_backup_vault.backup-vault.id + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id + snapshot_resource_group_name = azurerm_resource_group.rg-backup.name + backup_policy_id = azurerm_data_protection_backup_policy_kubernetes_cluster.backup-policy-aks.id + + backup_datasource_parameters { + excluded_namespaces = ["test-excluded-namespaces"] + excluded_resource_types = ["exvolumesnapshotcontents.snapshot.storage.k8s.io"] + cluster_scoped_resources_enabled = true + included_namespaces = ["*"] # ["test-included-namespaces"] + included_resource_types = ["*"] # ["involumesnapshotcontents.snapshot.storage.k8s.io"] + label_selectors = ["*"] # ["kubernetes.io/metadata.name:test"] + volume_snapshot_enabled = true + } + + depends_on = [ + azurerm_role_assignment.extension_and_storage_account_permission, + ] +} \ No newline at end of file diff --git a/80_aks_backup_tf/backup_policy.tf b/80_aks_backup_tf/backup_policy.tf new file mode 100644 index 0000000..50e7162 --- /dev/null +++ b/80_aks_backup_tf/backup_policy.tf @@ -0,0 +1,31 @@ +resource "azurerm_data_protection_backup_policy_kubernetes_cluster" "backup-policy-aks" { + name = "backup-policy-aks" + resource_group_name = azurerm_data_protection_backup_vault.backup-vault.resource_group_name + vault_name = azurerm_data_protection_backup_vault.backup-vault.name + + backup_repeating_time_intervals = ["R/2023-05-23T02:30:00+00:00/P1W"] + + retention_rule { + name = "Daily" + priority = 25 + + life_cycle { + duration = "P84D" + data_store_type = "OperationalStore" + } + + criteria { + days_of_week = ["Thursday"] + months_of_year = ["November"] + weeks_of_month = ["First"] + scheduled_backup_times = ["2023-05-23T02:30:00Z"] + } + } + + default_retention_rule { + life_cycle { + duration = "P14D" + data_store_type = "OperationalStore" + } + } +} \ No newline at end of file diff --git a/80_aks_backup_tf/backup_vault.tf b/80_aks_backup_tf/backup_vault.tf new file mode 100644 index 0000000..77bf993 --- /dev/null +++ b/80_aks_backup_tf/backup_vault.tf @@ -0,0 +1,23 @@ +resource "azurerm_data_protection_backup_vault" "backup-vault" { + name = "backup-vault" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_role_assignment" "vault_msi_read_on_cluster" { + scope = azurerm_kubernetes_cluster.aks.id + role_definition_name = "Reader" + principal_id = azurerm_data_protection_backup_vault.backup-vault.identity[0].principal_id +} + +resource "azurerm_role_assignment" "vault_msi_read_on_snap_rg" { + scope = azurerm_resource_group.rg-backup.id + role_definition_name = "Reader" + principal_id = azurerm_data_protection_backup_vault.backup-vault.identity[0].principal_id +} \ No newline at end of file diff --git a/80_aks_backup_tf/output.tf b/80_aks_backup_tf/output.tf new file mode 100644 index 0000000..e69de29 diff --git a/80_aks_backup_tf/providers.tf b/80_aks_backup_tf/providers.tf new file mode 100644 index 0000000..9f4bf2a --- /dev/null +++ b/80_aks_backup_tf/providers.tf @@ -0,0 +1,25 @@ +terraform { + + required_version = ">= 1.2.8" + + required_providers { + + azurerm = { + source = "hashicorp/azurerm" + version = "= 3.95.0" + } + + time = { + source = "hashicorp/time" + version = "0.10.0" + } + } +} + +provider "azurerm" { + features {} +} + +provider "time" { + # Configuration options +} diff --git a/80_aks_backup_tf/rg.tf b/80_aks_backup_tf/rg.tf new file mode 100644 index 0000000..db048fa --- /dev/null +++ b/80_aks_backup_tf/rg.tf @@ -0,0 +1,9 @@ +resource "azurerm_resource_group" "rg" { + name = "rg-akscluster-${var.prefix}" + location = var.location +} + +resource "azurerm_resource_group" "rg-backup" { + name = "rg-aks-backup-${var.prefix}" + location = var.location +} \ No newline at end of file diff --git a/80_aks_backup_tf/storage_account.tf b/80_aks_backup_tf/storage_account.tf new file mode 100644 index 0000000..4caaf58 --- /dev/null +++ b/80_aks_backup_tf/storage_account.tf @@ -0,0 +1,13 @@ +resource "azurerm_storage_account" "storage" { + name = "storage19753" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_container" "container" { + name = "backup-container" + storage_account_name = azurerm_storage_account.storage.name + container_access_type = "private" +} \ No newline at end of file diff --git a/80_aks_backup_tf/trusted_access.tf b/80_aks_backup_tf/trusted_access.tf new file mode 100644 index 0000000..4d20535 --- /dev/null +++ b/80_aks_backup_tf/trusted_access.tf @@ -0,0 +1,6 @@ +resource "azurerm_kubernetes_cluster_trusted_access_role_binding" "aks_cluster_trusted_access" { + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id + name = "trusted-access" + roles = ["Microsoft.DataProtection/backupVaults/backup-operator"] + source_resource_id = azurerm_data_protection_backup_vault.backup-vault.id +} \ No newline at end of file diff --git a/80_aks_backup_tf/variables.tf b/80_aks_backup_tf/variables.tf new file mode 100644 index 0000000..462f1c9 --- /dev/null +++ b/80_aks_backup_tf/variables.tf @@ -0,0 +1,7 @@ +variable "prefix" { + default = "80" +} + +variable "location" { + default = "swedencentral" +} \ No newline at end of file diff --git a/85_prometheus_grafana/.infracost/pricing.gob b/85_prometheus_grafana/.infracost/pricing.gob new file mode 100644 index 0000000..2f90f08 Binary files /dev/null and b/85_prometheus_grafana/.infracost/pricing.gob differ diff --git a/85_prometheus_grafana/providers.tf b/85_prometheus_grafana/providers.tf index d1de421..9f4bf2a 100644 --- a/85_prometheus_grafana/providers.tf +++ b/85_prometheus_grafana/providers.tf @@ -6,7 +6,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" - version = "= 3.94.0" + version = "= 3.95.0" } time = { diff --git a/88_prometheus_grafana_ampls/.infracost/pricing.gob b/88_prometheus_grafana_ampls/.infracost/pricing.gob new file mode 100644 index 0000000..9cc8866 Binary files /dev/null and b/88_prometheus_grafana_ampls/.infracost/pricing.gob differ diff --git a/88_prometheus_grafana_ampls/.infracost/terraform_modules/manifest.json b/88_prometheus_grafana_ampls/.infracost/terraform_modules/manifest.json index 5d95a8b..7b5be91 100644 --- a/88_prometheus_grafana_ampls/.infracost/terraform_modules/manifest.json +++ b/88_prometheus_grafana_ampls/.infracost/terraform_modules/manifest.json @@ -1 +1 @@ -{"Path":"d:\\Projects\\docker-kubernetes-course\\85_prometheus_grafana","Version":"2.0","Modules":[]} \ No newline at end of file +{"Path":"d:\\Projects\\docker-kubernetes-course\\88_prometheus_grafana_ampls","Version":"2.0","Modules":[]} \ No newline at end of file diff --git a/88_prometheus_grafana_ampls/grafana-mpe.tf b/88_prometheus_grafana_ampls/grafana-mpe.tf index 6589374..3bb5d30 100644 --- a/88_prometheus_grafana_ampls/grafana-mpe.tf +++ b/88_prometheus_grafana_ampls/grafana-mpe.tf @@ -21,15 +21,19 @@ data "azapi_resource_list" "mpe-grafana" { type = "Microsoft.Monitor/accounts/privateEndpointConnections@2023-04-03" parent_id = azurerm_monitor_workspace.prometheus.id response_export_values = ["*"] + depends_on = [azapi_resource.mpe-grafana] } # Retrieve the Grafana's Managed Private Endpoint ID locals { - mpe-grafana-id = element([for pe in jsondecode(data.azapi_resource_list.mpe-grafana.output).value : pe.id if pe.properties.privateLinkServiceConnectionState.status == "Pending"], 0) # strcontains(pe.id, azapi_resource.mpe-grafana.name)], 0) + # mpe-grafana-id = element([for pe in jsondecode(data.azapi_resource_list.mpe-grafana.output).value : pe.id if pe.properties.privateLinkServiceConnectionState.status == "Pending"], 0) + # mpe-grafana-id = element([for pe in jsondecode(data.azapi_resource_list.mpe-grafana.output).value : pe.id if strcontains(pe.id, azapi_resource.mpe-grafana.name)], 0) + mpe-grafana-id = try(element([for pe in jsondecode(data.azapi_resource_list.mpe-grafana.output).value : pe.id if pe.properties.privateLinkServiceConnectionState.status == "Pending"], 0), null) } # Approve Grafana's Managed Private Endpoint connection to Prometheus resource "azapi_update_resource" "approve-mpe-grafana" { + count = local.mpe-grafana-id != null ? 1 : 0 type = "Microsoft.Monitor/accounts/privateEndpointConnections@2023-04-03" resource_id = local.mpe-grafana-id diff --git a/88_prometheus_grafana_ampls/apps/ama-metrics-settings-configmap.yaml b/88_prometheus_grafana_ampls/k8s/ama-metrics-settings-configmap.yaml similarity index 100% rename from 88_prometheus_grafana_ampls/apps/ama-metrics-settings-configmap.yaml rename to 88_prometheus_grafana_ampls/k8s/ama-metrics-settings-configmap.yaml diff --git a/88_prometheus_grafana_ampls/apps/container-azm-ms-agentconfig.yaml b/88_prometheus_grafana_ampls/k8s/container-azm-ms-agentconfig.yaml similarity index 100% rename from 88_prometheus_grafana_ampls/apps/container-azm-ms-agentconfig.yaml rename to 88_prometheus_grafana_ampls/k8s/container-azm-ms-agentconfig.yaml diff --git a/88_prometheus_grafana_ampls/apps/deploy-svc-ingress.yaml b/88_prometheus_grafana_ampls/k8s/deploy-svc-ingress.yaml similarity index 100% rename from 88_prometheus_grafana_ampls/apps/deploy-svc-ingress.yaml rename to 88_prometheus_grafana_ampls/k8s/deploy-svc-ingress.yaml diff --git a/88_prometheus_grafana_ampls/apps/diagnostic_setting.tf b/88_prometheus_grafana_ampls/k8s/diagnostic_setting.tf similarity index 100% rename from 88_prometheus_grafana_ampls/apps/diagnostic_setting.tf rename to 88_prometheus_grafana_ampls/k8s/diagnostic_setting.tf diff --git a/88_prometheus_grafana_ampls/apps/import_grafafana_dashboard.tf b/88_prometheus_grafana_ampls/k8s/import_grafafana_dashboard.tf similarity index 100% rename from 88_prometheus_grafana_ampls/apps/import_grafafana_dashboard.tf rename to 88_prometheus_grafana_ampls/k8s/import_grafafana_dashboard.tf diff --git a/88_prometheus_grafana_ampls/apps/ingress-nginx.tf b/88_prometheus_grafana_ampls/k8s/ingress-nginx.tf similarity index 100% rename from 88_prometheus_grafana_ampls/apps/ingress-nginx.tf rename to 88_prometheus_grafana_ampls/k8s/ingress-nginx.tf diff --git a/88_prometheus_grafana_ampls/apps/logger-pod.yaml b/88_prometheus_grafana_ampls/k8s/logger-pod.yaml similarity index 100% rename from 88_prometheus_grafana_ampls/apps/logger-pod.yaml rename to 88_prometheus_grafana_ampls/k8s/logger-pod.yaml diff --git a/88_prometheus_grafana_ampls/log_analytics-dcr.tf b/88_prometheus_grafana_ampls/log_analytics-dcr.tf index 75f04f2..858fdce 100644 --- a/88_prometheus_grafana_ampls/log_analytics-dcr.tf +++ b/88_prometheus_grafana_ampls/log_analytics-dcr.tf @@ -3,6 +3,7 @@ resource "azurerm_monitor_data_collection_rule" "dcr-log-analytics" { resource_group_name = azurerm_resource_group.rg.name location = azurerm_resource_group.rg.location data_collection_endpoint_id = azurerm_monitor_data_collection_endpoint.dce-log-analytics.id + depends_on = [time_sleep.wait_60_seconds] destinations { log_analytics { @@ -18,10 +19,10 @@ resource "azurerm_monitor_data_collection_rule" "dcr-log-analytics" { data_sources { syslog { - name = "demo-syslog" - facility_names = [ "*" ] - log_levels = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency", ] - streams = [ "Microsoft-Syslog" ] + name = "demo-syslog" + facility_names = ["*"] + log_levels = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency", ] + streams = ["Microsoft-Syslog"] } extension { extension_name = "ContainerInsights" @@ -33,7 +34,7 @@ resource "azurerm_monitor_data_collection_rule" "dcr-log-analytics" { enableContainerLogV2 = true interval = "1m" namespaceFilteringMode = "Include" # "Exclude" "Off" - namespaces = ["kube-system", "default"] + namespaces = ["kube-system", "default"] enableContainerLogV2 = true } } @@ -42,6 +43,13 @@ resource "azurerm_monitor_data_collection_rule" "dcr-log-analytics" { } } +# DCR creation should be started about 60 seconds after the Log Analytics workspace is created +# This is a workaround, could be fixed in the future +resource "time_sleep" "wait_60_seconds" { + create_duration = "60s" + depends_on = [azurerm_log_analytics_workspace.workspace] +} + resource "azurerm_monitor_data_collection_rule_association" "dcra-dcr-log-analytics-aks" { name = "dcra-dcr-log-analytics-aks" target_resource_id = azurerm_kubernetes_cluster.aks.id diff --git a/88_prometheus_grafana_ampls/providers.tf b/88_prometheus_grafana_ampls/providers.tf index af0c126..56e947c 100644 --- a/88_prometheus_grafana_ampls/providers.tf +++ b/88_prometheus_grafana_ampls/providers.tf @@ -6,7 +6,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" - version = "= 3.94.0" + version = "= 3.95.0" } azuread = { diff --git a/_egress_proxy/README.md b/_egress_proxy/README.md new file mode 100644 index 0000000..4cf3d7b --- /dev/null +++ b/_egress_proxy/README.md @@ -0,0 +1,12 @@ +You can use your own (leaf) certificate by passing the --certs [domain=]path_to_certificate option to mitmproxy. Mitmproxy then uses the provided certificate for interception of the specified domain instead of generating a certificate signed by its own CA. + +The certificate file is expected to be in the PEM format. + +You can generate a certificate in this format using these instructions: + +```sh +openssl genrsa -out cert.key 2048 +# (Specify the mitm domain as Common Name, e.g. \*.google.com) +openssl req -new -x509 -key cert.key -out cert.crt +cat cert.key cert.crt > cert.pem +``` \ No newline at end of file diff --git a/_egress_proxy/aci-mitmproxy.tf b/_egress_proxy/aci-mitmproxy.tf new file mode 100644 index 0000000..3df5e1c --- /dev/null +++ b/_egress_proxy/aci-mitmproxy.tf @@ -0,0 +1,54 @@ +resource "azurerm_container_group" "aci-mitmproxy" { + name = "aci-mitmproxy" + location = azurerm_resource_group.rg.location + resource_group_name = azurerm_resource_group.rg.name + ip_address_type = "Public" + os_type = "Linux" + + container { + name = "mitmproxy" + image = "mitmproxy/mitmproxy:latest" + cpu = "1.0" + memory = "1.0" + + commands = [ + "/bin/bash", + "-c", + "mitmweb --listen-port 8080 --web-host 0.0.0.0 --web-port 8081 --set block_global=false" + ] + + ports { + port = 8080 + protocol = "TCP" + } + + ports { + port = 8081 + protocol = "TCP" + } + } + + exposed_port = [ + { + port = 8080 + protocol = "TCP" + }, + { + port = 8081 + protocol = "TCP" + }] +} + +resource "terraform_data" "aci-mitmproxy-get-certificate" { + triggers_replace = [ + azurerm_container_group.aci-mitmproxy.id + ] + + provisioner "local-exec" { + command = "az container exec -g ${azurerm_container_group.aci-mitmproxy.resource_group_name} --name ${azurerm_container_group.aci-mitmproxy.name} --exec-command 'cat ~/.mitmproxy/mitmproxy-ca-cert.pem | base64'" + } +} + +output "aci-mitmproxy-public_ip" { + value = azurerm_container_group.aci-mitmproxy.ip_address +} diff --git a/_egress_proxy/aks-proxy-config.json b/_egress_proxy/aks-proxy-config.json new file mode 100644 index 0000000..6c4185d --- /dev/null +++ b/_egress_proxy/aks-proxy-config.json @@ -0,0 +1,9 @@ +{ + "httpProxy": "http://20.76.37.30:8080/", + "httpsProxy": "https://20.76.37.30:8080/", + "noProxy": [ + "localhost", + "127.0.0.1" + ], + "trustedCA": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUROVENDQWgyZ0F3SUJBZ0lVRmVpUGVORDkweFNtNXlRWVJnT1E1cUplOEdjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tERVNNQkFHQTFVRUF3d0piV2wwYlhCeWIzaDVNUkl3RUFZRFZRUUtEQWx0YVhSdGNISnZlSGt3SGhjTgpNalF3TXpFek1EY3pPVFF4V2hjTk16UXdNekV6TURjek9UUXhXakFvTVJJd0VBWURWUVFEREFsdGFYUnRjSEp2CmVIa3hFakFRQmdOVkJBb01DVzFwZEcxd2NtOTRlVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTWJXVnRTKzM5aVd2RFc4bWFVVFhSYi82WlJvN1ZZZ3J2RVpwTFRSOFRIUUpUS3FGT0s0MFVzMgpzL0NZZUVqa2F4WWZFVGZJVGc4YURKVGJGaVpSZElGUklSM1Y5NWJsazV0bVFCb2kzaTVNb0Jhd05sTzgxL0V2CkZRZUZoZlkvdWJja0R4K1ZDRUxNVy9pSEFPUFRpYjRkbE94Y0gxQ3lEVi80TEVnc3JIL2tXN2hJWVo1bXRMblMKcFdHb1FqVlUvVW9tUnNaOXBNb2g1Y1pvbCtlZFNnY1ZlNzdOUiswYWwrdWYzYWpvYnkrcnZZSGdnbjg4bnYrKwpENGpUa01DVUkwMy81KzVxelhaUzR5VDNQbHd1SzA0WEZjMTVDTVJQY3gza3p0ekVYcHIxSDhXMm44QndCMzNiCnYxTnN5WXRGbnNXcFZmVFZKTG5zQWE1NDNTNHZzck1DQXdFQUFhTlhNRlV3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0hRWURWUjBPQkJZRQpGRyt0NGp4NGovWjQyWjNGZW9zdHpOOWk5NFdjTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDcGZLRlkyRnJOClFoMEo4cENSanE5UGdOekh3N3YzbWpnQTdINXlRWEpDK0VTdmkrMmJiT2NkNTNjdGo1TlpkN3ZjcWVmT3JtRS8KWXNvcElHa0l5dDdqdzV3d1FSNDFDVEhRbjRQTXA5akEySkthWnZqT1I0US9lUGlZSGgxSUozSklUQ25KYVNBRQpIbURTbjVybXlMZlVNSHFKVTdSKzFabDUyNUF4Vk9YeEdDMUZteElTWWhqNG1IcXVwRlBtMm15N3NOYWY2Nm03CjQreW5LZ2ttS3FyS3htZ0pFZk5zK1FPanpGWFlKTzBxczFwNXRJOUszYXF5QnBnVzU5cEJHa0dRMkVLMWduSXMKdlA1K3JjUUV0dTcrcWJGT08yYnByamJlZ1VGbkxuSXlzem5SOHRLdThzWU9vL2pGOFdZNVJsQ1pqQnNvNGxlQgo0MUxLUS9lTU84MjQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" +} \ No newline at end of file diff --git a/_egress_proxy/aks.tf b/_egress_proxy/aks.tf new file mode 100644 index 0000000..18e36bb --- /dev/null +++ b/_egress_proxy/aks.tf @@ -0,0 +1,65 @@ +resource "azurerm_kubernetes_cluster" "aks" { + name = "aks-cluster1" + location = azurerm_resource_group.rg.location + resource_group_name = azurerm_resource_group.rg.name + dns_prefix = "aks" + kubernetes_version = "1.29.0" + + network_profile { + network_plugin = "azure" + network_plugin_mode = "overlay" + ebpf_data_plane = "cilium" + outbound_type = "loadBalancer" + } + + default_node_pool { + name = "systempool" + node_count = 3 + vm_size = "standard_b2als_v2" + vnet_subnet_id = azurerm_subnet.snet-aks.id + } + + identity { + type = "SystemAssigned" + } + + http_proxy_config { + http_proxy = "http://${azurerm_container_group.aci-mitmproxy.ip_address}:8080/" # "http://20.76.37.30:8080/" + https_proxy = "http://${azurerm_container_group.aci-mitmproxy.ip_address}:8080/" # "http://20.76.37.30:8080/" + no_proxy = ["localhost","127.0.0.1"] #, azurerm_subnet.snet-aks.address_prefixes[0]] + trusted_ca = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUROVENDQWgyZ0F3SUJBZ0lVRmVpUGVORDkweFNtNXlRWVJnT1E1cUplOEdjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tERVNNQkFHQTFVRUF3d0piV2wwYlhCeWIzaDVNUkl3RUFZRFZRUUtEQWx0YVhSdGNISnZlSGt3SGhjTgpNalF3TXpFek1EY3pPVFF4V2hjTk16UXdNekV6TURjek9UUXhXakFvTVJJd0VBWURWUVFEREFsdGFYUnRjSEp2CmVIa3hFakFRQmdOVkJBb01DVzFwZEcxd2NtOTRlVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTWJXVnRTKzM5aVd2RFc4bWFVVFhSYi82WlJvN1ZZZ3J2RVpwTFRSOFRIUUpUS3FGT0s0MFVzMgpzL0NZZUVqa2F4WWZFVGZJVGc4YURKVGJGaVpSZElGUklSM1Y5NWJsazV0bVFCb2kzaTVNb0Jhd05sTzgxL0V2CkZRZUZoZlkvdWJja0R4K1ZDRUxNVy9pSEFPUFRpYjRkbE94Y0gxQ3lEVi80TEVnc3JIL2tXN2hJWVo1bXRMblMKcFdHb1FqVlUvVW9tUnNaOXBNb2g1Y1pvbCtlZFNnY1ZlNzdOUiswYWwrdWYzYWpvYnkrcnZZSGdnbjg4bnYrKwpENGpUa01DVUkwMy81KzVxelhaUzR5VDNQbHd1SzA0WEZjMTVDTVJQY3gza3p0ekVYcHIxSDhXMm44QndCMzNiCnYxTnN5WXRGbnNXcFZmVFZKTG5zQWE1NDNTNHZzck1DQXdFQUFhTlhNRlV3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0hRWURWUjBPQkJZRQpGRyt0NGp4NGovWjQyWjNGZW9zdHpOOWk5NFdjTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDcGZLRlkyRnJOClFoMEo4cENSanE5UGdOekh3N3YzbWpnQTdINXlRWEpDK0VTdmkrMmJiT2NkNTNjdGo1TlpkN3ZjcWVmT3JtRS8KWXNvcElHa0l5dDdqdzV3d1FSNDFDVEhRbjRQTXA5akEySkthWnZqT1I0US9lUGlZSGgxSUozSklUQ25KYVNBRQpIbURTbjVybXlMZlVNSHFKVTdSKzFabDUyNUF4Vk9YeEdDMUZteElTWWhqNG1IcXVwRlBtMm15N3NOYWY2Nm03CjQreW5LZ2ttS3FyS3htZ0pFZk5zK1FPanpGWFlKTzBxczFwNXRJOUszYXF5QnBnVzU5cEJHa0dRMkVLMWduSXMKdlA1K3JjUUV0dTcrcWJGT08yYnByamJlZ1VGbkxuSXlzem5SOHRLdThzWU9vL2pGOFdZNVJsQ1pqQnNvNGxlQgo0MUxLUS9lTU84MjQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + } + + # http_proxy_config { + # http_proxy = "http://${azurerm_container_group.aci-mitmproxy.ip_address}:8080/" + # https_proxy = "https://${azurerm_container_group.aci-mitmproxy.ip_address}:8080/" + # no_proxy = ["localhost", "127.0.0.1"] #, azurerm_subnet.snet-aks.address_prefixes[0]] + # trusted_ca = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUROVENDQWgyZ0F3SUJBZ0lVR0greHNoSzVYOUZaMDR1WVk0WWZSU0tTdS93d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tERVNNQkFHQTFVRUF3d0piV2wwYlhCeWIzaDVNUkl3RUFZRFZRUUtEQWx0YVhSdGNISnZlSGt3SGhjTgpNalF3TXpFeU1UWXlPVEkxV2hjTk16UXdNekV5TVRZeU9USTFXakFvTVJJd0VBWURWUVFEREFsdGFYUnRjSEp2CmVIa3hFakFRQmdOVkJBb01DVzFwZEcxd2NtOTRlVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBS2hwUE0xMHJ5aGFWQzVDVllNeDdETFlEV2Y2TTcvSDVkQXdmWFlEQ0JWbm4zOFhFbVV6ZGp3NApKRzhjczRJRHBPUFlBY2pCazBscVpWZWd5UkYraDByNk5zcjQ1NENTejRqb2YvcWJKTHAwSkhDWEhmTCtNbDFPCkNEL3ZBcHVoTHRSYlIvdXp1cVU5MnJWOWpNMUExVDRyaVhVQ0xMcmNHMVFOakhMcVRGSkxwR3l3NDdnOGxXUlYKVGcwSkpzK0ZFYXZibjBEQ3JvVDFpem1ZMmNYendQY3JDZHpDbUxpWVR0cVJYaldqZ2NtSWtuWEt6ZlIxVnJ4Vwo1WFNidTVyMExCRzYwQzZxeEtQZlNqQ3EvQm5sTjVMNW8xRlBOekR4NEVCelJvbks4VjA4ZzhqNlRqQUpTakxJClN6VVRYUjMrV1cxR2FHRTdvcmJ0OHdwNGYvbzBPSGtDQXdFQUFhTlhNRlV3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0hRWURWUjBPQkJZRQpGS3c5akdTVS95dlV3cTllaURuSnZ6eXJVOXpFTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDTUJIU3U0QmlLCkhsdzlzbkV6ejQrTXl2RzdUVzBmdXRyNE5SZ0RyOTZieVBtRXlkWFlLUE85ZlVkQUI2S1J5QTlYWlNMQW4vWlUKWWFsSHlIMzU0NHY3WG1MRG11ZjhPWm8vMjdXdm9WVytGYWFxWnoybldsR1NsbW5XMTZ3SlpMUUpCSSs0U0NsRApTMmxkTnhmOHJFMDh1K2xNY0ZvZmphRG1TbERLNHQ2RXovQ3RmdEcxTWtUUk81N0JhbDlCY0t5RjIzV3ljRXVyCjFHVWt0N29JYWJHaXpkSW84RXFzbnNJSnJyTTRUS1A0NFVMei9aczlpQzUvWUVCUVNrZTg4T3RTc21TQjM5NHIKMEltU2dDOFVJMFB1UzF1YTI2MnNtMUI1dE11Yml6bUVFY3lTQ1pEUDRYTWhCdjBzSU10eldNaGFDazVNY3FvZAoxVXd4ZjRYSEEyU3kKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + # } + + # oms_agent { + # log_analytics_workspace_id = azurerm_log_analytics_workspace.workspace.id + # msi_auth_for_monitoring_enabled = true + # } + + # monitor_metrics { + # annotations_allowed = null + # labels_allowed = null + # } + + lifecycle { + ignore_changes = [ + default_node_pool.0.upgrade_settings, + ] + } +} + +resource "terraform_data" "aks-get-credentials" { + triggers_replace = [ + azurerm_kubernetes_cluster.aks.id + ] + + provisioner "local-exec" { + command = "az aks get-credentials --resource-group ${azurerm_kubernetes_cluster.aks.resource_group_name} --name ${azurerm_kubernetes_cluster.aks.name} --overwrite-existing" + } +} diff --git a/_egress_proxy/certificate/cert.crt b/_egress_proxy/certificate/cert.crt new file mode 100644 index 0000000..d4e5652 --- /dev/null +++ b/_egress_proxy/certificate/cert.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIUWaI/Mxw392v2DN8tIL0/mpdmTXQwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNDAzMTUxMjU2MjRaFw0yNDA0 +MTQxMjU2MjRaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCEtJyn4u/4EQIHDSdP6+yJzxxc04bxBe4Fx9JKXcEG +mlYLy5ltMtK1ZIR2CKnRyyczyHhodIh8Gf6Z/OikYJMtjkQS7RqvOhvKDIvipt32 +FLvOQ2Kwga1quA6+Tzi13HvWMz096dsTajMZQu+HdBG65sYjLMvTav1EPdD9z0QM +hsLRDKLstYF3jcrC1e9SRz4nNJBgKhCpCGBCyED87TiTB+hXoILNWdjda5w9rE3k +rGII+il904KO/pGotWGYZ8zbTnhi7Mig8Q7tXV7MwJLuFSxKCj9CA+Gx64XCX154 +jIE5Uq6n+/h3eKAkj08ofY3nEUMxzD5OtBWlwZscgVCFAgMBAAGjUzBRMB0GA1Ud +DgQWBBR9HJnnahCLUn4xr/k4gXOKJfdtMzAfBgNVHSMEGDAWgBR9HJnnahCLUn4x +r/k4gXOKJfdtMzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAe +OWKSnZgDXETHlvuyqaqmH2Qcn/WGq+dhzQjc4vRH5FA6nDpPtC6+BvlW+5/wprXf +hg0HFemgYjTjrtPDUzaBR/Z3hLCkk0gY9cKlk9+e2YhOZqvLa/2XFnE/E5DZEuwF +LtwGz9PQzB5O/Q8PieBsMocrkLNYpem4k21AeOkX/EUglnePb4N3lVuWuz6ehzJA +1v5nPZGATU9jXNszYtLtg0fXaZY1nN9EUCMCQ15lMRJcWl13MxxK7ueKJVweIMJz +TzsBzqV2G8tSOi6Pq6784M9/4nQEOmNFbiJD42VnFr1Kw+/RaJpzDraY3COT1PCz +WhfrRMcsNED+BLp0Cy8E +-----END CERTIFICATE----- diff --git a/_egress_proxy/certificate/cert.key b/_egress_proxy/certificate/cert.key new file mode 100644 index 0000000..2799774 --- /dev/null +++ b/_egress_proxy/certificate/cert.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQCEtJyn4u/4EQIH +DSdP6+yJzxxc04bxBe4Fx9JKXcEGmlYLy5ltMtK1ZIR2CKnRyyczyHhodIh8Gf6Z +/OikYJMtjkQS7RqvOhvKDIvipt32FLvOQ2Kwga1quA6+Tzi13HvWMz096dsTajMZ +Qu+HdBG65sYjLMvTav1EPdD9z0QMhsLRDKLstYF3jcrC1e9SRz4nNJBgKhCpCGBC +yED87TiTB+hXoILNWdjda5w9rE3krGII+il904KO/pGotWGYZ8zbTnhi7Mig8Q7t +XV7MwJLuFSxKCj9CA+Gx64XCX154jIE5Uq6n+/h3eKAkj08ofY3nEUMxzD5OtBWl +wZscgVCFAgMBAAECgf8Iju9DxdIp09BA3Hwk4R8GhsLz57BoWuKBbZJDms1k2n/g +M4E5P2dw/o0bvbXZ2YVUG9moFhlQ3VxOHEaIfEuQ+Qnq3KJKAb+a+N0hH5y7NUQQ +XdJogFrfZccSwX7zl7zp7SF5LBQbQ3j4zn6zfaJppjXeemPTvB91d9kKR+q3uwpb +KRrwKx7yhy+aNzRjlXWgK3zLZiFb3PL8pjEWp2A9WE80tQGkmxaEtNrrXu0aBi95 +JXXC/7t7gOpvAkMjdBiElxrFEz2Jr3LhyRa8/xRGF0CZLxb9UZwZ02dpeC26YDfO +GVgni3WOPpK7ioZ32skzMn4Ho/qHLykkEWq/3FECgYEAu31fs/PgTDXjupm20nAx +OvQNLl4kIbu3dHxek1L4KjUS3KQuyhg9zYf49LPzaaKzyGzJT4sjX+7L6cSieGRD +9JtYajpIWTfCNViz3fL0FMdqb+AAIO6Nbtz1iW30exCnFnzIyKGx3PBCShLNn6bs +c+1DsAoRCkZtT76KacdgSpUCgYEAtTJ6NYr+xzsDUyNcGJ/mbvkEcjiA+VmmFBcR +eKuTnIBduYHhe3WAWgm2DLMHuqoCaeSTQ0Mt0ueWYsdRJxzyeGbxNbJ+If2jqT6K +A6kvTcUip83MlsB9b+QR+eIWX1X+wMdwelxcMTreIHdAtgeJv/iDa/iMta6b6zFa +RMDuYjECgYEAuaZ1o3zzNsON0fHvZAUP2m5atvUlFfoIuGGGTJ81eKXBHZW9dwP1 +/pSLYdLmTk17dBS0af0+c/nDFKFOt6Og3o8MR3OavC1IMwa4ZCf0pLapoEnQFsvg +ZEyLHSAxm8JrkQrSzke+FSYanbpsvY/ORyRDiAcPxHrkNrhX2lI/+NkCgYAqxNBd +xQIgKoi9XfJGCbANb4+iGj4vHP77bPp9vhnobdAxkjuTtYdnOTWUR8nCQJCzR/WO +gdPWHT288Qjxr3539uxmXUwyX7j6oL1Y4d09gROOAiCRULwK5g1sKvZW6GhqPmkJ +KLXGFPwLM7q9fIgCHPmASbmExMMev5Zr9hIOcQKBgHvsheeZv7nS//Vvzwstvy5v +SEAPx4zBtdITHM18BPAF8WI7f8TD4Tutlwq7caSdDHVcoSd+KsLJcR1SIJV+aqfh +w1qWOyK2zvs8gpBWP124H7Rv9D/aMua7DwyBPG14IMk2IpMV+NSCbVqHJqbK8kJK +TQpFmmyywRTsC2fxSKyS +-----END PRIVATE KEY----- diff --git a/_egress_proxy/certificate/cert.pem b/_egress_proxy/certificate/cert.pem new file mode 100644 index 0000000..b0fd505 --- /dev/null +++ b/_egress_proxy/certificate/cert.pem @@ -0,0 +1,49 @@ +-----BEGIN PRIVATE KEY----- +MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQCEtJyn4u/4EQIH +DSdP6+yJzxxc04bxBe4Fx9JKXcEGmlYLy5ltMtK1ZIR2CKnRyyczyHhodIh8Gf6Z +/OikYJMtjkQS7RqvOhvKDIvipt32FLvOQ2Kwga1quA6+Tzi13HvWMz096dsTajMZ +Qu+HdBG65sYjLMvTav1EPdD9z0QMhsLRDKLstYF3jcrC1e9SRz4nNJBgKhCpCGBC +yED87TiTB+hXoILNWdjda5w9rE3krGII+il904KO/pGotWGYZ8zbTnhi7Mig8Q7t +XV7MwJLuFSxKCj9CA+Gx64XCX154jIE5Uq6n+/h3eKAkj08ofY3nEUMxzD5OtBWl +wZscgVCFAgMBAAECgf8Iju9DxdIp09BA3Hwk4R8GhsLz57BoWuKBbZJDms1k2n/g +M4E5P2dw/o0bvbXZ2YVUG9moFhlQ3VxOHEaIfEuQ+Qnq3KJKAb+a+N0hH5y7NUQQ +XdJogFrfZccSwX7zl7zp7SF5LBQbQ3j4zn6zfaJppjXeemPTvB91d9kKR+q3uwpb +KRrwKx7yhy+aNzRjlXWgK3zLZiFb3PL8pjEWp2A9WE80tQGkmxaEtNrrXu0aBi95 +JXXC/7t7gOpvAkMjdBiElxrFEz2Jr3LhyRa8/xRGF0CZLxb9UZwZ02dpeC26YDfO +GVgni3WOPpK7ioZ32skzMn4Ho/qHLykkEWq/3FECgYEAu31fs/PgTDXjupm20nAx +OvQNLl4kIbu3dHxek1L4KjUS3KQuyhg9zYf49LPzaaKzyGzJT4sjX+7L6cSieGRD +9JtYajpIWTfCNViz3fL0FMdqb+AAIO6Nbtz1iW30exCnFnzIyKGx3PBCShLNn6bs +c+1DsAoRCkZtT76KacdgSpUCgYEAtTJ6NYr+xzsDUyNcGJ/mbvkEcjiA+VmmFBcR +eKuTnIBduYHhe3WAWgm2DLMHuqoCaeSTQ0Mt0ueWYsdRJxzyeGbxNbJ+If2jqT6K +A6kvTcUip83MlsB9b+QR+eIWX1X+wMdwelxcMTreIHdAtgeJv/iDa/iMta6b6zFa +RMDuYjECgYEAuaZ1o3zzNsON0fHvZAUP2m5atvUlFfoIuGGGTJ81eKXBHZW9dwP1 +/pSLYdLmTk17dBS0af0+c/nDFKFOt6Og3o8MR3OavC1IMwa4ZCf0pLapoEnQFsvg +ZEyLHSAxm8JrkQrSzke+FSYanbpsvY/ORyRDiAcPxHrkNrhX2lI/+NkCgYAqxNBd +xQIgKoi9XfJGCbANb4+iGj4vHP77bPp9vhnobdAxkjuTtYdnOTWUR8nCQJCzR/WO +gdPWHT288Qjxr3539uxmXUwyX7j6oL1Y4d09gROOAiCRULwK5g1sKvZW6GhqPmkJ +KLXGFPwLM7q9fIgCHPmASbmExMMev5Zr9hIOcQKBgHvsheeZv7nS//Vvzwstvy5v +SEAPx4zBtdITHM18BPAF8WI7f8TD4Tutlwq7caSdDHVcoSd+KsLJcR1SIJV+aqfh +w1qWOyK2zvs8gpBWP124H7Rv9D/aMua7DwyBPG14IMk2IpMV+NSCbVqHJqbK8kJK +TQpFmmyywRTsC2fxSKyS +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIUWaI/Mxw392v2DN8tIL0/mpdmTXQwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNDAzMTUxMjU2MjRaFw0yNDA0 +MTQxMjU2MjRaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCEtJyn4u/4EQIHDSdP6+yJzxxc04bxBe4Fx9JKXcEG +mlYLy5ltMtK1ZIR2CKnRyyczyHhodIh8Gf6Z/OikYJMtjkQS7RqvOhvKDIvipt32 +FLvOQ2Kwga1quA6+Tzi13HvWMz096dsTajMZQu+HdBG65sYjLMvTav1EPdD9z0QM +hsLRDKLstYF3jcrC1e9SRz4nNJBgKhCpCGBCyED87TiTB+hXoILNWdjda5w9rE3k +rGII+il904KO/pGotWGYZ8zbTnhi7Mig8Q7tXV7MwJLuFSxKCj9CA+Gx64XCX154 +jIE5Uq6n+/h3eKAkj08ofY3nEUMxzD5OtBWlwZscgVCFAgMBAAGjUzBRMB0GA1Ud +DgQWBBR9HJnnahCLUn4xr/k4gXOKJfdtMzAfBgNVHSMEGDAWgBR9HJnnahCLUn4x +r/k4gXOKJfdtMzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAe +OWKSnZgDXETHlvuyqaqmH2Qcn/WGq+dhzQjc4vRH5FA6nDpPtC6+BvlW+5/wprXf +hg0HFemgYjTjrtPDUzaBR/Z3hLCkk0gY9cKlk9+e2YhOZqvLa/2XFnE/E5DZEuwF +LtwGz9PQzB5O/Q8PieBsMocrkLNYpem4k21AeOkX/EUglnePb4N3lVuWuz6ehzJA +1v5nPZGATU9jXNszYtLtg0fXaZY1nN9EUCMCQ15lMRJcWl13MxxK7ueKJVweIMJz +TzsBzqV2G8tSOi6Pq6784M9/4nQEOmNFbiJD42VnFr1Kw+/RaJpzDraY3COT1PCz +WhfrRMcsNED+BLp0Cy8E +-----END CERTIFICATE----- diff --git a/_egress_proxy/certificate/generate-cert.sh b/_egress_proxy/certificate/generate-cert.sh new file mode 100644 index 0000000..bf5e191 --- /dev/null +++ b/_egress_proxy/certificate/generate-cert.sh @@ -0,0 +1,4 @@ +openssl genrsa -out cert.key 2048 +# (Specify the mitm domain as Common Name, e.g. \*.google.com) +openssl req -new -x509 -key cert.key -out cert.crt +cat cert.key cert.crt > cert.pem diff --git a/_egress_proxy/install-mitmproxy.sh b/_egress_proxy/install-mitmproxy.sh new file mode 100644 index 0000000..1f4fc0c --- /dev/null +++ b/_egress_proxy/install-mitmproxy.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# sudo apt update -y + +# wget https://downloads.mitmproxy.org/10.2.2/mitmproxy-10.2.2-linux-x86_64.tar.gz + +# tar -xvf mitmproxy-10.2.2-linux-x86_64.tar.gz + +# # start the proxy; this is also needed to generate the certificates + +# ./mitmproxy + +sudo apt update -y + +sudo apt install python3-pip -y + +pip3 install mitmproxy + +# mitmproxy --listen-port 8080 --web-host 0.0.0.0 --web-port 8081 --set block_global=false + +mitmweb --listen-port 8080 --web-host 0.0.0.0 --web-port 8081 --certs *=cert.pem --set block_global=false + +# screen -d -m mitmweb --listen-port 8080 --web-host 0.0.0.0 --web-port 8081 --set block_global=false + +# install the cert in: mitm.it \ No newline at end of file diff --git a/_egress_proxy/mitmproxy-ca-cert (22).p12 b/_egress_proxy/mitmproxy-ca-cert (22).p12 new file mode 100644 index 0000000..5da363b Binary files /dev/null and b/_egress_proxy/mitmproxy-ca-cert (22).p12 differ diff --git a/_egress_proxy/output.tf b/_egress_proxy/output.tf new file mode 100644 index 0000000..e69de29 diff --git a/_egress_proxy/providers.tf b/_egress_proxy/providers.tf new file mode 100644 index 0000000..ab397d4 --- /dev/null +++ b/_egress_proxy/providers.tf @@ -0,0 +1,19 @@ +terraform { + + required_version = ">= 1.7" + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = ">= 3.96.0" + } + } +} + +provider "azurerm" { + features { + resource_group { + prevent_deletion_if_contains_resources = false + } + } +} diff --git a/_egress_proxy/rg.tf b/_egress_proxy/rg.tf new file mode 100644 index 0000000..713cf37 --- /dev/null +++ b/_egress_proxy/rg.tf @@ -0,0 +1,4 @@ +resource "azurerm_resource_group" "rg" { + name = "rg-aks-proxy-${var.prefix}" + location = "westeurope" +} \ No newline at end of file diff --git a/_egress_proxy/variables.tf b/_egress_proxy/variables.tf new file mode 100644 index 0000000..dfd32b7 --- /dev/null +++ b/_egress_proxy/variables.tf @@ -0,0 +1,5 @@ +variable "prefix" { + description = "Prefix for resources" + type = string + default = "67" +} \ No newline at end of file diff --git a/_egress_proxy/vm-linux-proxy-mitm.tf b/_egress_proxy/vm-linux-proxy-mitm.tf new file mode 100644 index 0000000..7dffe4b --- /dev/null +++ b/_egress_proxy/vm-linux-proxy-mitm.tf @@ -0,0 +1,62 @@ +resource "azurerm_public_ip" "pip-vm-proxy" { + name = "pip-vm-proxy" + location = azurerm_resource_group.rg.location + resource_group_name = azurerm_resource_group.rg.name + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_network_interface" "nic-vm-proxy" { + name = "nic-vm-proxy" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + + enable_ip_forwarding = true + + ip_configuration { + name = "internal" + subnet_id = azurerm_subnet.subnet-vm.id + private_ip_address_allocation = "Dynamic" + public_ip_address_id = azurerm_public_ip.pip-vm-proxy.id + } +} + +resource "azurerm_linux_virtual_machine" "vm-proxy" { + name = "vm-linux-mitmproxy" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + size = "Standard_B2ats_v2" + disable_password_authentication = false + admin_username = "azureuser" + admin_password = "@Aa123456789" + network_interface_ids = [azurerm_network_interface.nic-vm-proxy.id] + priority = "Spot" + eviction_policy = "Deallocate" + + custom_data = filebase64("./install-mitmproxy.sh") + + os_disk { + name = "os-disk-vm" + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + + source_image_reference { + publisher = "canonical" + offer = "0001-com-ubuntu-server-jammy" + sku = "22_04-lts-gen2" + version = "latest" + } + + boot_diagnostics { + storage_account_uri = null + } +} + +output "vm_ublic_ip" { + value = azurerm_public_ip.pip-vm-proxy.ip_address +} + +output "vm_private_ip" { + value = azurerm_network_interface.nic-vm-proxy.private_ip_address +} \ No newline at end of file diff --git a/_egress_proxy/vnet-hub.tf b/_egress_proxy/vnet-hub.tf new file mode 100644 index 0000000..7633566 --- /dev/null +++ b/_egress_proxy/vnet-hub.tf @@ -0,0 +1,14 @@ +resource "azurerm_virtual_network" "vnet-hub" { + name = "vnet-hub-weu" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + address_space = ["10.0.0.0/16"] + dns_servers = null +} + +resource "azurerm_subnet" "subnet-vm" { + name = "subnet-vm" + resource_group_name = azurerm_virtual_network.vnet-hub.resource_group_name + virtual_network_name = azurerm_virtual_network.vnet-hub.name + address_prefixes = ["10.0.0.0/24"] +} \ No newline at end of file diff --git a/_egress_proxy/vnet-spoke.tf b/_egress_proxy/vnet-spoke.tf new file mode 100644 index 0000000..f2bb548 --- /dev/null +++ b/_egress_proxy/vnet-spoke.tf @@ -0,0 +1,13 @@ +resource "azurerm_virtual_network" "vnet" { + name = "vnet-aks" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + address_space = ["10.10.0.0/16"] +} + +resource "azurerm_subnet" "snet-aks" { + name = "snet-aks" + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = azurerm_virtual_network.vnet.resource_group_name + address_prefixes = ["10.10.0.0/24"] +} \ No newline at end of file diff --git a/tmp/main.tf b/tmp/main.tf index eac9b69..a93747f 100644 --- a/tmp/main.tf +++ b/tmp/main.tf @@ -1,660 +1,156 @@ -resource "azurerm_resource_group" "rg" { - location = var.resource_group_location - name = "defaultPrometheusOnboardingResourceGroup" -} - -resource "azurerm_kubernetes_cluster" "k8s" { - location = azurerm_resource_group.rg.location - name = var.cluster_name - resource_group_name = azurerm_resource_group.rg.name - - - dns_prefix = var.dns_prefix - tags = { - Environment = "Development" - } +data "azurerm_client_config" "current" {} - default_node_pool { - name = "agentpool" - vm_size = "Standard_D2_v2" - node_count = var.agent_count - } +resource "azurerm_resource_group" "example" { + name = "example" + location = "West Europe" +} - monitor_metrics { - annotations_allowed = var.metric_annotations_allowlist - labels_allowed = var.metric_labels_allowlist - } +resource "azurerm_resource_group" "snap" { + name = "example-snap" + location = "West Europe" +} - network_profile { - network_plugin = "kubenet" - load_balancer_sku = "standard" - } +resource "azurerm_data_protection_backup_vault" "example" { + name = "example" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" identity { type = "SystemAssigned" } } -resource "azurerm_monitor_workspace" "amw" { - name = var.monitor_workspace_name - resource_group_name = azurerm_resource_group.rg.name - location = azurerm_resource_group.rg.location -} - -resource "azurerm_monitor_data_collection_endpoint" "dce" { - name = "MSProm-${azurerm_resource_group.rg.location}-${var.cluster_name}" - resource_group_name = azurerm_resource_group.rg.name - location = azurerm_resource_group.rg.location - kind = "Linux" -} - -resource "azurerm_monitor_data_collection_rule" "dcr" { - name = "MSProm-${azurerm_resource_group.rg.location}-${var.cluster_name}" - resource_group_name = azurerm_resource_group.rg.name - location = azurerm_resource_group.rg.location - data_collection_endpoint_id = azurerm_monitor_data_collection_endpoint.dce.id - kind = "Linux" +resource "azurerm_kubernetes_cluster" "example" { + name = "example" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + dns_prefix = "dns" - destinations { - monitor_account { - monitor_account_id = azurerm_monitor_workspace.amw.id - name = "MonitoringAccount1" - } - } - - data_flow { - streams = ["Microsoft-PrometheusMetrics"] - destinations = ["MonitoringAccount1"] - } - - - data_sources { - prometheus_forwarder { - streams = ["Microsoft-PrometheusMetrics"] - name = "PrometheusDataSource" - } + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + # enable_host_encryption = true } - description = "DCR for Azure Monitor Metrics Profile (Managed Prometheus)" - depends_on = [ - azurerm_monitor_data_collection_endpoint.dce - ] -} - -resource "azurerm_monitor_data_collection_rule_association" "dcra" { - name = "MSProm-${azurerm_resource_group.rg.location}-${var.cluster_name}" - target_resource_id = azurerm_kubernetes_cluster.k8s.id - data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr.id - description = "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster." - depends_on = [ - azurerm_monitor_data_collection_rule.dcr - ] -} - -resource "azurerm_dashboard_grafana" "grafana" { - name = var.grafana_name - resource_group_name = azurerm_resource_group.rg.name - location = var.grafana_location - identity { type = "SystemAssigned" } - - azure_monitor_workspace_integrations { - resource_id = azurerm_monitor_workspace.amw.id - } } -resource "azurerm_role_assignment" "datareaderrole" { - scope = azurerm_monitor_workspace.amw.id - role_definition_id = "/subscriptions/${split("/", azurerm_monitor_workspace.amw.id)[2]}/providers/Microsoft.Authorization/roleDefinitions/b0d8363b-8ddd-447d-831f-62ca05bff136" - principal_id = azurerm_dashboard_grafana.grafana.identity.0.principal_id +resource "azurerm_kubernetes_cluster_trusted_access_role_binding" "aks_cluster_trusted_access" { + kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id + name = "example" + roles = ["Microsoft.DataProtection/backupVaults/backup-operator"] + source_resource_id = azurerm_data_protection_backup_vault.example.id } -resource "azurerm_monitor_alert_prometheus_rule_group" "node_recording_rules_rule_group" { - name = "NodeRecordingRulesRuleGroup-${var.cluster_name}" - location = azurerm_resource_group.rg.location - resource_group_name = azurerm_resource_group.rg.name - cluster_name = var.cluster_name - description = "Node Recording Rules Rule Group" - rule_group_enabled = true - interval = "PT1M" - scopes = [azurerm_monitor_workspace.amw.id,azurerm_kubernetes_cluster.k8s.id] - - rule { - enabled = true - record = "instance:node_num_cpu:sum" - expression = <