Skip to content

Commit

Permalink
proxy
Browse files Browse the repository at this point in the history
  • Loading branch information
HoussemDellai committed Mar 15, 2024
1 parent 2429021 commit b072deb
Show file tree
Hide file tree
Showing 47 changed files with 767 additions and 630 deletions.
Binary file modified .infracost/pricing.gob
Binary file not shown.
9 changes: 5 additions & 4 deletions 57_filter_egress_traffic_fqdn/commands.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,23 @@ az aks create -n $AKS_NAME -g $RG_NAME --network-plugin none
az aks get-credentials -n $AKS_NAME -g $RG_NAME --overwrite-existing

helm repo add cilium https://helm.cilium.io/
helm repo update

helm upgrade --install cilium cilium/cilium --version 1.14.2 `
helm upgrade --install cilium cilium/cilium --version 1.15.1 `
--namespace kube-system `
--set aksbyocni.enabled=true `
--set nodeinit.enabled=true `
--set sctp.enabled=true `
--set hubble.enabled=true `
--set hubble.metrics.enabled="{dns,drop,tcp,flow,icmp,http}" `
--set hubble.metrics.enabled="{dns,drop,tcp,flow,icmp,http,port-distribution,labelsContext=source_ip\,source_namespace\,source_workload\,destination_ip\,destination_namespace\,destination_workload\,traffic_direction}" `
--set hubble.relay.enabled=true `
--set hubble.ui.enabled=true `
--set hubble.ui.service.type=NodePort `
--set hubble.relay.service.type=NodePort
# --set gatewayAPI.enabled=true

# Restart unmanaged Pods (required by new Cilium install)
kubectl delete pods -A -all
kubectl delete pods -A --all
# kubectl get pods --all-namespaces -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true | grep '<none>' | awk '{print "-n "$1" "$2}' | xaRG_NAMEs -L 1 -r kubectl delete pod

# make sure cilium CLI is installed on your machine (https://github.com/cilium/cilium-cli/releases/tag/v0.15.0)
Expand All @@ -39,7 +40,7 @@ cilium status --wait
cilium connectivity test

# deploy sample online service, just to get public IP
$FQDN=(az container create -g $RG_NAME -n aci-app --image nginx:latest --ports 80 --ip-address public --dns-name-label aci-app-931 --query ipAddress.fqdn --output tsv)
$FQDN=(az container create -g $RG_NAME -n aci-app --image nginx:latest --ports 80 --ip-address public --dns-name-label aci-app-931 --query ipAddress.fqdn --output tsv)
$FQDN
# aci-app-931.westeurope.azurecontainer.io

Expand Down
40 changes: 40 additions & 0 deletions 80_aks_backup_tf/Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Private Azure Grafana, Prometheus and Log Analytics with AKS

## Introduction

With AKS, you can use `Azure Monitor Workspace for Prometheus` and `Azure Managed Grafana` to collect, query and visualize the metrics from AKS.
And to collect logs, you can use `Azure Log Analytics`.

This lab will provide an implementation for monitoring and logging.

## Architecture

![](images/architecture.png)

## Deploying the resources using Terraform

To deploy the Terraform configuration files, run the following commands:

```sh
terraform init

terraform plan -out tfplan

terraform apply tfplan
```

The following resources will be created.

![](images/resources.png)

## Cleanup resources

To delete the creates resources, run the following command:

```sh
terraform destroy
```

## More readings

https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/azure-monitor-workspace-manage?tabs=azure-portal
22 changes: 22 additions & 0 deletions 80_aks_backup_tf/aks-backup-extenstion.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
resource "azurerm_kubernetes_cluster_extension" "extension" {
name = "backup-extension"
cluster_id = azurerm_kubernetes_cluster.aks.id
extension_type = "Microsoft.DataProtection.Kubernetes"
release_train = "stable"
release_namespace = "dataprotection-microsoft"
configuration_settings = {
"configuration.backupStorageLocation.bucket" = azurerm_storage_container.container.name
"configuration.backupStorageLocation.config.resourceGroup" = azurerm_storage_account.storage.resource_group_name
"configuration.backupStorageLocation.config.storageAccount" = azurerm_storage_account.storage.name
"configuration.backupStorageLocation.config.subscriptionId" = data.azurerm_client_config.current.subscription_id
"credentials.tenantId" = data.azurerm_client_config.current.tenant_id
}
}

resource "azurerm_role_assignment" "extension_and_storage_account_permission" {
scope = azurerm_storage_account.storage.id
role_definition_name = "Storage Account Contributor"
principal_id = azurerm_kubernetes_cluster_extension.extension.aks_assigned_identity[0].principal_id
}

data "azurerm_client_config" "current" {}
29 changes: 29 additions & 0 deletions 80_aks_backup_tf/aks.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
resource "azurerm_kubernetes_cluster" "aks" {
name = "aks-cluster"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
dns_prefix = "aks"
kubernetes_version = "1.29.0"

network_profile {
network_plugin = "azure"
network_plugin_mode = "overlay"
ebpf_data_plane = "cilium"
}

default_node_pool {
name = "systempool"
node_count = 3
vm_size = "standard_b2als_v2"
}

identity {
type = "SystemAssigned"
}
}

resource "azurerm_role_assignment" "cluster_msi_contributor_on_snap_rg" {
scope = azurerm_resource_group.rg-backup.id
role_definition_name = "Contributor"
principal_id = azurerm_kubernetes_cluster.aks.identity[0].principal_id
}
22 changes: 22 additions & 0 deletions 80_aks_backup_tf/backup_instance.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
resource "azurerm_data_protection_backup_instance_kubernetes_cluster" "backup-instance" {
name = "backup-instance"
location = azurerm_resource_group.rg.location
vault_id = azurerm_data_protection_backup_vault.backup-vault.id
kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id
snapshot_resource_group_name = azurerm_resource_group.rg-backup.name
backup_policy_id = azurerm_data_protection_backup_policy_kubernetes_cluster.backup-policy-aks.id

backup_datasource_parameters {
excluded_namespaces = ["test-excluded-namespaces"]
excluded_resource_types = ["exvolumesnapshotcontents.snapshot.storage.k8s.io"]
cluster_scoped_resources_enabled = true
included_namespaces = ["*"] # ["test-included-namespaces"]
included_resource_types = ["*"] # ["involumesnapshotcontents.snapshot.storage.k8s.io"]
label_selectors = ["*"] # ["kubernetes.io/metadata.name:test"]
volume_snapshot_enabled = true
}

depends_on = [
azurerm_role_assignment.extension_and_storage_account_permission,
]
}
31 changes: 31 additions & 0 deletions 80_aks_backup_tf/backup_policy.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
resource "azurerm_data_protection_backup_policy_kubernetes_cluster" "backup-policy-aks" {
name = "backup-policy-aks"
resource_group_name = azurerm_data_protection_backup_vault.backup-vault.resource_group_name
vault_name = azurerm_data_protection_backup_vault.backup-vault.name

backup_repeating_time_intervals = ["R/2023-05-23T02:30:00+00:00/P1W"]

retention_rule {
name = "Daily"
priority = 25

life_cycle {
duration = "P84D"
data_store_type = "OperationalStore"
}

criteria {
days_of_week = ["Thursday"]
months_of_year = ["November"]
weeks_of_month = ["First"]
scheduled_backup_times = ["2023-05-23T02:30:00Z"]
}
}

default_retention_rule {
life_cycle {
duration = "P14D"
data_store_type = "OperationalStore"
}
}
}
23 changes: 23 additions & 0 deletions 80_aks_backup_tf/backup_vault.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
resource "azurerm_data_protection_backup_vault" "backup-vault" {
name = "backup-vault"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
datastore_type = "VaultStore"
redundancy = "LocallyRedundant"

identity {
type = "SystemAssigned"
}
}

resource "azurerm_role_assignment" "vault_msi_read_on_cluster" {
scope = azurerm_kubernetes_cluster.aks.id
role_definition_name = "Reader"
principal_id = azurerm_data_protection_backup_vault.backup-vault.identity[0].principal_id
}

resource "azurerm_role_assignment" "vault_msi_read_on_snap_rg" {
scope = azurerm_resource_group.rg-backup.id
role_definition_name = "Reader"
principal_id = azurerm_data_protection_backup_vault.backup-vault.identity[0].principal_id
}
Empty file added 80_aks_backup_tf/output.tf
Empty file.
25 changes: 25 additions & 0 deletions 80_aks_backup_tf/providers.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
terraform {

required_version = ">= 1.2.8"

required_providers {

azurerm = {
source = "hashicorp/azurerm"
version = "= 3.95.0"
}

time = {
source = "hashicorp/time"
version = "0.10.0"
}
}
}

provider "azurerm" {
features {}
}

provider "time" {
# Configuration options
}
9 changes: 9 additions & 0 deletions 80_aks_backup_tf/rg.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
resource "azurerm_resource_group" "rg" {
name = "rg-akscluster-${var.prefix}"
location = var.location
}

resource "azurerm_resource_group" "rg-backup" {
name = "rg-aks-backup-${var.prefix}"
location = var.location
}
13 changes: 13 additions & 0 deletions 80_aks_backup_tf/storage_account.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
resource "azurerm_storage_account" "storage" {
name = "storage19753"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
account_tier = "Standard"
account_replication_type = "LRS"
}

resource "azurerm_storage_container" "container" {
name = "backup-container"
storage_account_name = azurerm_storage_account.storage.name
container_access_type = "private"
}
6 changes: 6 additions & 0 deletions 80_aks_backup_tf/trusted_access.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
resource "azurerm_kubernetes_cluster_trusted_access_role_binding" "aks_cluster_trusted_access" {
kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id
name = "trusted-access"
roles = ["Microsoft.DataProtection/backupVaults/backup-operator"]
source_resource_id = azurerm_data_protection_backup_vault.backup-vault.id
}
7 changes: 7 additions & 0 deletions 80_aks_backup_tf/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
variable "prefix" {
default = "80"
}

variable "location" {
default = "swedencentral"
}
Binary file added 85_prometheus_grafana/.infracost/pricing.gob
Binary file not shown.
2 changes: 1 addition & 1 deletion 85_prometheus_grafana/providers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ terraform {

azurerm = {
source = "hashicorp/azurerm"
version = "= 3.94.0"
version = "= 3.95.0"
}

time = {
Expand Down
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"Path":"d:\\Projects\\docker-kubernetes-course\\85_prometheus_grafana","Version":"2.0","Modules":[]}
{"Path":"d:\\Projects\\docker-kubernetes-course\\88_prometheus_grafana_ampls","Version":"2.0","Modules":[]}
6 changes: 5 additions & 1 deletion 88_prometheus_grafana_ampls/grafana-mpe.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,19 @@ data "azapi_resource_list" "mpe-grafana" {
type = "Microsoft.Monitor/accounts/privateEndpointConnections@2023-04-03"
parent_id = azurerm_monitor_workspace.prometheus.id
response_export_values = ["*"]
depends_on = [azapi_resource.mpe-grafana]
}

# Retrieve the Grafana's Managed Private Endpoint ID
locals {
mpe-grafana-id = element([for pe in jsondecode(data.azapi_resource_list.mpe-grafana.output).value : pe.id if pe.properties.privateLinkServiceConnectionState.status == "Pending"], 0) # strcontains(pe.id, azapi_resource.mpe-grafana.name)], 0)
# mpe-grafana-id = element([for pe in jsondecode(data.azapi_resource_list.mpe-grafana.output).value : pe.id if pe.properties.privateLinkServiceConnectionState.status == "Pending"], 0)
# mpe-grafana-id = element([for pe in jsondecode(data.azapi_resource_list.mpe-grafana.output).value : pe.id if strcontains(pe.id, azapi_resource.mpe-grafana.name)], 0)
mpe-grafana-id = try(element([for pe in jsondecode(data.azapi_resource_list.mpe-grafana.output).value : pe.id if pe.properties.privateLinkServiceConnectionState.status == "Pending"], 0), null)
}

# Approve Grafana's Managed Private Endpoint connection to Prometheus
resource "azapi_update_resource" "approve-mpe-grafana" {
count = local.mpe-grafana-id != null ? 1 : 0
type = "Microsoft.Monitor/accounts/privateEndpointConnections@2023-04-03"
resource_id = local.mpe-grafana-id

Expand Down
18 changes: 13 additions & 5 deletions 88_prometheus_grafana_ampls/log_analytics-dcr.tf
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ resource "azurerm_monitor_data_collection_rule" "dcr-log-analytics" {
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
data_collection_endpoint_id = azurerm_monitor_data_collection_endpoint.dce-log-analytics.id
depends_on = [time_sleep.wait_60_seconds]

destinations {
log_analytics {
Expand All @@ -18,10 +19,10 @@ resource "azurerm_monitor_data_collection_rule" "dcr-log-analytics" {

data_sources {
syslog {
name = "demo-syslog"
facility_names = [ "*" ]
log_levels = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency", ]
streams = [ "Microsoft-Syslog" ]
name = "demo-syslog"
facility_names = ["*"]
log_levels = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency", ]
streams = ["Microsoft-Syslog"]
}
extension {
extension_name = "ContainerInsights"
Expand All @@ -33,7 +34,7 @@ resource "azurerm_monitor_data_collection_rule" "dcr-log-analytics" {
enableContainerLogV2 = true
interval = "1m"
namespaceFilteringMode = "Include" # "Exclude" "Off"
namespaces = ["kube-system", "default"]
namespaces = ["kube-system", "default"]
enableContainerLogV2 = true
}
}
Expand All @@ -42,6 +43,13 @@ resource "azurerm_monitor_data_collection_rule" "dcr-log-analytics" {
}
}

# DCR creation should be started about 60 seconds after the Log Analytics workspace is created
# This is a workaround, could be fixed in the future
resource "time_sleep" "wait_60_seconds" {
create_duration = "60s"
depends_on = [azurerm_log_analytics_workspace.workspace]
}

resource "azurerm_monitor_data_collection_rule_association" "dcra-dcr-log-analytics-aks" {
name = "dcra-dcr-log-analytics-aks"
target_resource_id = azurerm_kubernetes_cluster.aks.id
Expand Down
2 changes: 1 addition & 1 deletion 88_prometheus_grafana_ampls/providers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ terraform {

azurerm = {
source = "hashicorp/azurerm"
version = "= 3.94.0"
version = "= 3.95.0"
}

azuread = {
Expand Down
12 changes: 12 additions & 0 deletions _egress_proxy/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
You can use your own (leaf) certificate by passing the --certs [domain=]path_to_certificate option to mitmproxy. Mitmproxy then uses the provided certificate for interception of the specified domain instead of generating a certificate signed by its own CA.

Check warning on line 1 in _egress_proxy/README.md

View workflow job for this annotation

GitHub Actions / spell-ckeck

Unknown word (mitmproxy)

Check warning on line 1 in _egress_proxy/README.md

View workflow job for this annotation

GitHub Actions / spell-ckeck

Unknown word (Mitmproxy)

The certificate file is expected to be in the PEM format.

You can generate a certificate in this format using these instructions:

```sh
openssl genrsa -out cert.key 2048
# (Specify the mitm domain as Common Name, e.g. \*.google.com)

Check warning on line 9 in _egress_proxy/README.md

View workflow job for this annotation

GitHub Actions / spell-ckeck

Unknown word (mitm)
openssl req -new -x509 -key cert.key -out cert.crt
cat cert.key cert.crt > cert.pem
```
Loading

0 comments on commit b072deb

Please sign in to comment.