Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add complete example of infrastructure with aws #87

Merged
merged 4 commits into from
Dec 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
162 changes: 162 additions & 0 deletions infra-examples/aws/eks/eks.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
locals {
cluster_autoscaler_tags = var.enable_cluster_autoscaler ? {
"k8s.io/cluster-autoscaler/${var.cluster_name}" = "owned"
"k8s.io/cluster-autoscaler/enabled" = "true"
} : {}
post_bootstrap_user_data = var.post_bootstrap_user_data != null ? var.post_bootstrap_user_data : templatefile(
"${path.module}/templates/post_bootstrap_user_data.tpl",
{
registry_credentials = var.registry_credentials
}
)
# Define the default IAM role additional policies for all the node groups
# every element must define:
# - A key for the policy. It can be any string
# - A value which is the ARN of the policy to add
# - An enable key which determines if the policy is added or not
node_group_defaults_iam_role_additional_policies = [
]
}

data "aws_ami" "latest_ubuntu_eks" {
most_recent = true
owners = ["099720109477"] # Canonical

filter {
name = "name"
values = ["ubuntu-eks/k8s_${var.cluster_version}/images/hvm-ssd/ubuntu-${var.ubuntu_version}-amd64-server-*"]
}
}

module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 20.24"
cluster_name = var.cluster_name
cluster_version = var.cluster_version
cluster_endpoint_public_access = true
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
enable_irsa = true
cluster_tags = var.cluster_tags
tags = var.tags

create_cloudwatch_log_group = false
cluster_enabled_log_types = []

cluster_addons = {
coredns = {
name = "coredns"
}
kube-proxy = {
name = "kube-proxy"
}
vpc-cni = {
name = "vpc-cni"
}
aws-ebs-csi-driver = {
name = "aws-ebs-csi-driver"
service_account_role_arn = module.ebs_csi_irsa_role.iam_role_arn
}
}

# The security group rules below should not conflict with the recommended rules defined
# here: https://github.com/terraform-aws-modules/terraform-aws-eks/blob/v19.21.0/node_groups.tf#L128
node_security_group_additional_rules = {
Henrrypg marked this conversation as resolved.
Show resolved Hide resolved
ssh_access = {
description = "Grant access ssh access to the nodes"
protocol = "tcp"
from_port = 22
to_port = 22
type = "ingress"
cidr_blocks = concat([module.vpc.vpc_cidr_block], var.extra_ssh_cidrs)
}
}

# Disable secrets encryption
cluster_encryption_config = {}

iam_role_use_name_prefix = var.iam_role_use_name_prefix
iam_role_name = var.iam_role_name

# for security group
cluster_security_group_description = var.cluster_security_group_description
cluster_security_group_use_name_prefix = var.cluster_security_group_use_name_prefix
cluster_security_group_name = var.cluster_security_group_name

eks_managed_node_group_defaults = {
iam_role_additional_policies = { for s in local.node_group_defaults_iam_role_additional_policies : s.key => s.value if s.enable }
}

eks_managed_node_groups = {
ubuntu_worker = {

ami_id = var.ami_id != "" ? var.ami_id : data.aws_ami.latest_ubuntu_eks.id
key_name = var.key_name
name = var.node_group_name
subnet_ids = coalesce(var.node_group_subnets, module.vpc.private_subnets)

# This will ensure the boostrap user data is used to join the node
# By default, EKS managed node groups will not append bootstrap script;
# this adds it back in using the default template provided by the module
# Note: this assumes the AMI provided is an EKS optimized AMI derivative
enable_bootstrap_user_data = true

instance_types = var.instance_types
max_size = var.max_size
min_size = var.min_size
desired_size = var.desired_size
capacity_type = var.capacity_type

create_security_group = false

post_bootstrap_user_data = local.post_bootstrap_user_data

block_device_mappings = {
sda1 = {
device_name = "/dev/sda1"
ebs = {
volume_size = var.disk_size
}
}
}

tags = merge(var.node_groups_tags, local.cluster_autoscaler_tags)
}
}
}

module "ebs_csi_irsa_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "~> 5.47"

role_name = "ebs-csi-controller-${var.cluster_name}"
attach_ebs_csi_policy = true
tags = var.tags

oidc_providers = {
main = {
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"]
}
}
}

# Role required by cluster_autoscaler
module "cluster_autoscaler_irsa_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "~> 5.47"

count = var.enable_cluster_autoscaler ? 1 : 0

role_name = "cluster-autoscaler-${var.cluster_name}"
attach_cluster_autoscaler_policy = true
cluster_autoscaler_cluster_ids = [module.eks.cluster_name]
tags = var.tags

oidc_providers = {
ex = {
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = ["kube-system:cluster-autoscaler"]
}
}
}
100 changes: 100 additions & 0 deletions infra-examples/aws/eks/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
output "vpc_id" {
description = "The ID of the VPC"
value = module.vpc.vpc_id
}

output "vpc_arn" {
description = "The ARN of the VPC"
value = module.vpc.vpc_arn
}

output "vpc_cidr_block" {
description = "The CIDR block of the VPC"
value = module.vpc.vpc_cidr_block
}

output "default_security_group_id" {
description = "The ID of the security group created by default on VPC creation"
value = module.vpc.default_security_group_id
}

output "vpc_ipv6_association_id" {
description = "The association ID for the IPv6 CIDR block"
value = module.vpc.vpc_ipv6_association_id
}

output "vpc_ipv6_cidr_block" {
description = "The IPv6 CIDR block"
value = module.vpc.vpc_ipv6_cidr_block
}

output "vpc_secondary_cidr_blocks" {
description = "List of secondary CIDR blocks of the VPC"
value = module.vpc.vpc_secondary_cidr_blocks
}

output "vpc_owner_id" {
description = "The ID of the AWS account that owns the VPC"
value = module.vpc.vpc_owner_id
}

output "private_subnets" {
description = "List of IDs of private subnets"
value = module.vpc.private_subnets
}

output "private_subnet_arns" {
description = "List of ARNs of private subnets"
value = module.vpc.private_subnet_arns
}

output "private_subnets_cidr_blocks" {
description = "List of cidr_blocks of private subnets"
value = module.vpc.private_subnets_cidr_blocks
}

output "private_subnets_ipv6_cidr_blocks" {
description = "List of IPv6 cidr_blocks of private subnets in an IPv6 enabled VPC"
value = module.vpc.private_subnets_ipv6_cidr_blocks
}

output "public_subnets" {
description = "List of IDs of public subnets"
value = module.vpc.public_subnets
}

output "public_subnet_arns" {
description = "List of ARNs of public subnets"
value = module.vpc.public_subnet_arns
}

output "public_subnets_cidr_blocks" {
description = "List of cidr_blocks of public subnets"
value = module.vpc.public_subnets_cidr_blocks
}

output "public_subnets_ipv6_cidr_blocks" {
description = "List of IPv6 cidr_blocks of public subnets in an IPv6 enabled VPC"
value = module.vpc.public_subnets_ipv6_cidr_blocks
}

# Static values (arguments)
output "azs" {
description = "A list of availability zones specified as argument to this module"
value = var.azs
}

output "cluster_name" {
description = "The name of the EKS cluster"
value = module.eks.cluster_name
}

output "cluster_endpoint" {
description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}

output "cluster_version" {
description = "The Kubernetes version for the cluster"
value = module.eks.cluster_version
}
4 changes: 4 additions & 0 deletions infra-examples/aws/eks/provider.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
provider "aws" {
region = var.aws_region

}
94 changes: 94 additions & 0 deletions infra-examples/aws/eks/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
# Terraform AWS VPC and EKS Cluster Deployment

This guide provides a step-by-step process to deploy a Virtual Private Cloud (VPC) and an Elastic Kubernetes Service (EKS) cluster in AWS using Terraform.

## Prerequisites

Ensure the following tools and configurations are set up before proceeding:

- **Terraform** installed (version 1.5.6+).
- **AWS CLI** installed and configured with the appropriate credentials and region.
- **SSH Key Pair** created in AWS (you’ll need the key pair name for `key_name`).
- Proper IAM permissions to create VPC, EC2, and EKS resources.

## Steps for Deployment

### 1. Clone the Repository

```bash
git clone <repository-url>
cd <repository-directory>
```

### 2. Initialize Terraform

Run the following command to initialize Terraform and download the required providers and modules:

``` bash
terraform init
```

### 3. Customize Variables

Edit the `variables.tf` file or edit the `values.auto.tfvars.json` file to override default values as needed. Below is a table describing the available variables:

| Variable | Description | Type | Default |
|------------------------------------ |---------------------------------------------------------------------------------|---------------|-------------------|
| `aws_region` | The AWS Region in which to deploy the resources | `string` | |
| `private_subnets` | List of private subnets | `list(string)`| `[]` |
| `public_subnets` | List of public subnets | `list(string)`| `[]` |
| `cidr` | CIDR block for the VPC | `string` | `10.0.0.0/16` |
| `azs` | List of availability zones to use | `list(string)`| `[]` |
| `vpc_name` | The VPC name | `string` | |
| `enable_nat_gateway` | Enable NAT Gateway | `bool` | `true` |
| `single_nat_gateway` | Use a single NAT Gateway | `bool` | `false` |
| `one_nat_gateway_per_az` | Deploy one NAT gateway per availability zone | `bool` | `true` |
| `instance_types` | EC2 Instance types for the Kubernetes nodes | `list(string)`| |
| `cluster_version` | Kubernetes version for the EKS cluster | `string` | `1.29` |
| `cluster_name` | Name of the EKS cluster | `string` | |
| `desired_size` | Desired number of nodes in the EKS cluster | `number` | `2` |
| `disk_size` | Disk size for the nodes (in GB) | `number` | `40` |
| `key_name` | Name of the SSH Key Pair | `string` | |
| `max_size` | Maximum number of nodes in the EKS cluster | `number` | `3` |
| `min_size` | Minimum number of nodes in the EKS cluster | `number` | `1` |
| `extra_ssh_cidrs` | List of additional IP blocks allowed SSH access | `list(string)`| `[]` |
| `registry_credentials` | Image registry credentials for the nodes | `string` | |
| `node_groups_tags` | A map of tags to add to all node group resources | `map(string)` | `{}` |
| `enable_cluster_autoscaler` | Enable cluster autoscaler for the EKS cluster | `bool` | `false` |
| `ubuntu_version` | Ubuntu version for the nodes (default: `jammy-22.04`) | `string` | `jammy-22.04` |
| `ami_id` | AMI ID for EKS nodes (optional) | `string` | `""` |
| `iam_role_use_name_prefix` | Use a name prefix for the IAM role associated with the cluster | `bool` | `true` |
| `iam_role_name` | IAM Role name for the cluster | `string` | `null` |
| `cluster_security_group_use_name_prefix`| Use a name prefix for the cluster security group | `bool` | `true` |
| `cluster_security_group_name` | Security group name for the cluster | `string` | `null` |
| `cluster_security_group_description`| Description of the cluster security group | `string` | `EKS cluster security group` |
| `node_group_subnets` | Subnets for node groups (typically private) | `list(string)`| `null` |
| `cluster_tags` | A map of tags to add to the cluster | `map(string)` | `{}` |
| `tags` | A map of tags to add to all resources | `map(string)` | `{}` |
| `node_group_name` | Name of the node group | `string` | `ubuntu_worker` |
| `capacity_type` | Type of capacity for EKS Node Group (options: `ON_DEMAND`, `SPOT`) | `string` | `ON_DEMAND` |
| `post_bootstrap_user_data` | Add post-bootstrap user data (optional) | `string` | `null` |

### 4. Apply the Terraform Configuration

Run the following command to deploy the infrastructure:

```bash
terraform apply
```

### 5. Access the EKS Cluster

Once the deployment is complete, you can configure your kubectl to access the EKS cluster:

```bash
aws eks --region <aws-region> update-kubeconfig --name <cluster-name>
```

### 6. Clean Up

To destroy the infrastructure when you no longer need it, run:

```bash
terraform destroy
```
2 changes: 2 additions & 0 deletions infra-examples/aws/eks/templates/post_bootstrap_user_data.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Add Docker Registry credentials
echo '${registry_credentials}' > /var/lib/kubelet/config.json
Loading