Skip to content

Commit

Permalink
Terraform infra for EKS added
Browse files Browse the repository at this point in the history
  • Loading branch information
amirdamirov committed Feb 8, 2022
1 parent 30be8a0 commit 71dcb9e
Show file tree
Hide file tree
Showing 14 changed files with 378 additions and 2 deletions.
9 changes: 7 additions & 2 deletions README.MD
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,12 @@ I will demonstrate EKS and application deployment in this repo.
We have 2 folders:

- apps ( applications source code )
- terraform ( infra source code )
- terraformeks ( infra source code )

1. First lets configure terraform to use remote state in aws.
For implementing this we will need a S3 bucket with version enabling. Then we can check terraform/backend.tf file to configure it.
For implementing this we will need a S3 bucket with version enabling. Check terraform/backend.tf file to see how it should configure.

2. Go to "terraformeks" folder and run next commands:
- terraform init
- terraform plan
- terraform apply
Empty file added k8s/app-deploy.yml
Empty file.
Empty file added k8s/app-service.yml
Empty file.
Empty file added k8s/mg-service.yml
Empty file.
Empty file added k8s/mg-ss.yml
Empty file.
137 changes: 137 additions & 0 deletions terraform/.terraform.lock.hcl

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions terraform/backend.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
terraform {
backend "s3" {
bucket = "stateterraformisac"
key = "terraform.tfstate"
region = "eu-central-1"
}
}
38 changes: 38 additions & 0 deletions terraform/eks-cluster.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "17.24.0"
cluster_name = local.cluster_name
cluster_version = "1.20"
subnets = module.vpc.private_subnets

vpc_id = module.vpc.vpc_id

workers_group_defaults = {
root_volume_type = "gp2"
}

worker_groups = [
{
name = "worker-group-1"
instance_type = "t2.small"
additional_userdata = "echo foo bar"
additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
asg_desired_capacity = 2
},
{
name = "worker-group-2"
instance_type = "t2.medium"
additional_userdata = "echo foo bar"
additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
asg_desired_capacity = 1
},
]
}

data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}

data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
19 changes: 19 additions & 0 deletions terraform/kubernetes-dashboard-admin.rbac.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
# Create ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
12 changes: 12 additions & 0 deletions terraform/kubernetes.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Kubernetes provider
# https://learn.hashicorp.com/terraform/kubernetes/provision-eks-cluster#optional-configure-terraform-kubernetes-provider
# To learn how to schedule deployments and services using the provider, go here: https://learn.hashicorp.com/terraform/kubernetes/deploy-nginx-kubernetes

# The Kubernetes provider is included in this file so the EKS module can complete successfully. Otherwise, it throws an error when creating `kubernetes_config_map.aws_auth`.
# You should **not** schedule deployments and services in this workspace. This keeps workspaces modular (one for provision EKS, another for scheduling Kubernetes resources) as per best practices.

provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
token = data.aws_eks_cluster_auth.cluster.token
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
}
34 changes: 34 additions & 0 deletions terraform/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
output "cluster_id" {
description = "EKS cluster ID."
value = module.eks.cluster_id
}

output "cluster_endpoint" {
description = "Endpoint for EKS control plane."
value = module.eks.cluster_endpoint
}

output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}

output "kubectl_config" {
description = "kubectl config as generated by the module."
value = module.eks.kubeconfig
}

output "config_map_aws_auth" {
description = "A kubernetes configuration to authenticate to this EKS cluster."
value = module.eks.config_map_aws_auth
}

output "region" {
description = "AWS region"
value = var.region
}

output "cluster_name" {
description = "Kubernetes Cluster Name"
value = local.cluster_name
}
47 changes: 47 additions & 0 deletions terraform/security-groups.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@

resource "aws_security_group" "worker_group_mgmt_one" {
name_prefix = "worker_group_mgmt_one"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 22
to_port = 22
protocol = "tcp"

cidr_blocks = [
"10.0.0.0/8",
]
}
}

resource "aws_security_group" "worker_group_mgmt_two" {
name_prefix = "worker_group_mgmt_two"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 22
to_port = 22
protocol = "tcp"

cidr_blocks = [
"192.168.0.0/16",
]
}
}

resource "aws_security_group" "all_worker_mgmt" {
name_prefix = "all_worker_management"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 22
to_port = 22
protocol = "tcp"

cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
}
30 changes: 30 additions & 0 deletions terraform/versions.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.20.0"
}

random = {
source = "hashicorp/random"
version = "3.1.0"
}

local = {
source = "hashicorp/local"
version = "2.1.0"
}

null = {
source = "hashicorp/null"
version = "3.1.0"
}

kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.1"
}
}

required_version = ">= 0.14"
}
47 changes: 47 additions & 0 deletions terraform/vpc.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
variable "region" {
default = "eu-central-1"
description = "AWS region"
}

provider "aws" {
region = var.region
}

data "aws_availability_zones" "available" {}

locals {
cluster_name = "education-eks-${random_string.suffix.result}"
}

resource "random_string" "suffix" {
length = 8
special = false
}

module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.2.0"

name = "education-vpc"
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true

tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
}

public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}

private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}

0 comments on commit 71dcb9e

Please sign in to comment.