Skip to content

Commit

Permalink
Private cluster (#84)
Browse files Browse the repository at this point in the history
  • Loading branch information
thpang authored Jul 29, 2021
1 parent 0a1a87e commit 332ceb1
Show file tree
Hide file tree
Showing 16 changed files with 311 additions and 187 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,6 @@ terraform.tfvars

# Mac files
.DS_Store

# Configuration files
*.conf
3 changes: 2 additions & 1 deletion docs/CONFIG-VARS.md
Original file line number Diff line number Diff line change
Expand Up @@ -274,11 +274,12 @@ When `storage_type=ha`, [AWS Elastic File System](https://aws.amazon.com/efs/) s
| postgres_instance_type | The VM type for the PostgreSQL Server | string | "db.m5.xlarge" | |
| postgres_storage_size | Max storage allowed for the PostgreSQL server in MB | number | 50 | |
| postgres_backup_retention_days | Backup retention days for the PostgreSQL server | number | 7 | Supported values are between 7 and 35 days. |
| postgres_storage_encrypted | Encrypt PostgrSQL data at rest | bool | false| |
| postgres_storage_encrypted | Encrypt PostgreSQL data at rest | bool | false| |
| postgres_administrator_login | The Administrator Login for the PostgreSQL Server | string | "pgadmin" | Changing this forces a new resource to be created |
| postgres_administrator_password | The Password associated with the postgres_administrator_login for the PostgreSQL Server | string | | |
| postgres_db_name | Name of database to create | string | "SharedServices" | |
| postgres_multi_az | Specifies if PostgreSQL instance is multi-AZ | bool | false | |
| postgres_deletion_protection | Protect from accidental resource deletion | bool | false | |
| postgres_ssl_enforcement_enabled | Enforce SSL on connections to PostgreSQL server instance | bool | true | |
| postgres_parameters | additional parameters for PostgreSQL server | list of maps | [] | |
| postgres_options | additional options for PostgreSQL server | list of maps | [] | |
1 change: 1 addition & 0 deletions examples/sample-input-byo.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -130,4 +130,5 @@ create_jump_vm = true

# Cloud Postgres values config
create_postgres = true # set this to "false" when using internal Crunchy Postgres and AWS Postgres is NOT needed
postgres_ssl_enforcement_enabled = false
postgres_administrator_password = "mySup3rS3cretPassw0rd"
1 change: 1 addition & 0 deletions examples/sample-input-custom-data.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -120,4 +120,5 @@ create_jump_vm = true

# Cloud Postgres values config
create_postgres = true # set this to "false" when using internal Crunchy Postgres and AWS Postgres is NOT needed
postgres_ssl_enforcement_enabled = false
postgres_administrator_password = "mySup3rS3cretPassw0rd"
1 change: 1 addition & 0 deletions examples/sample-input-ha.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -123,4 +123,5 @@ create_jump_vm = true

# Cloud Postgres values config
create_postgres = true # set this to "false" when using internal Crunchy Postgres and AWS Postgres is NOT needed
postgres_ssl_enforcement_enabled = false
postgres_administrator_password = "mySup3rS3cretPassw0rd"
4 changes: 2 additions & 2 deletions examples/sample-input-minimal.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ tags = { } # e.g., { "key1" = "value1", "key2

## Cluster config
kubernetes_version = "1.19"
default_nodepool_node_count = 2
default_nodepool_node_count = 1
default_nodepool_vm_type = "m5.large"
default_nodepool_custom_data = ""

Expand Down Expand Up @@ -78,7 +78,7 @@ create_nfs_public_ip = false
nfs_vm_admin = "nfsuser"
nfs_vm_type = "m5.xlarge"


# Cloud Postgres values config
create_postgres = false # set this to "false" when using internal Crunchy Postgres and AWS Postgres is NOT needed
postgres_ssl_enforcement_enabled = false
postgres_administrator_password = "mySup3rS3cretPassw0rd"
1 change: 1 addition & 0 deletions examples/sample-input.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -120,4 +120,5 @@ create_jump_vm = true

# Cloud Postgres values config
create_postgres = true # set this to "false" when using internal Crunchy Postgres and AWS Postgres is NOT needed
postgres_ssl_enforcement_enabled = false
postgres_administrator_password = "mySup3rS3cretPassw0rd"
8 changes: 4 additions & 4 deletions files/custom-data/additional_userdata.sh
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,10 @@ if [ -d "/nvme/disk" ]; then
else
rm -rf /nvme/disk
fi
ln -s /pv-disks/$UUID /nvme/disk
echo "/nvme/disk has been symlinked to /pv-disks/$UUID"
fi
ln -s /pv-disks/$UUID /nvme/disk
echo "/nvme/disk has been symlinked to /pv-disks/$UUID"

mkdir -p /nvme/disk/{cache,saswork}
chmod 777 -R /nvme
chown -R nobody:nobody /nvme
chmod 777 -R /nvme/disk/
chown -R nobody:nobody /nvme/disk/
6 changes: 5 additions & 1 deletion files/policies/devops-iac-eks-policy.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@
"autoscaling:Describe*",
"autoscaling:DetachInstances",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"autoscaling:SuspendProcesses",
"cloudformation:DescribeStacks",
"cloudformation:ListStacks",
"cloudformation:ListStackResources",
"ec2:AllocateAddress",
"ec2:AssignPrivateIpAddresses",
Expand Down Expand Up @@ -82,11 +84,12 @@
"ec2:DescribeInstances",
"ec2:TerminateInstances",
"ec2:DescribeAccountAttributes",
"ec2:*VpcEndpoint*",
"elasticfilesystem:*",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeTags",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticfilesystem:*",
"eks:*",
"ecr:CreateRepository",
"ecr:DeleteRepository",
Expand Down Expand Up @@ -140,6 +143,7 @@
"iam:TagPolicy",
"iam:TagInstanceProfile",
"iam:TagOpenIDConnectProvider",
"iam:UntagPolicy",
"iam:UpdateAssumeRolePolicy",
"iam:UpdateAccessKey",
"resource-groups:*",
Expand Down
78 changes: 78 additions & 0 deletions locals.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
locals {

# General
security_group_id = var.security_group_id == null ? aws_security_group.sg[0].id : data.aws_security_group.sg[0].id
cluster_name = "${var.prefix}-eks"

# Infrastructure Mode
is_standard = var.infra_mode == "standard" ? true : false
is_private = var.infra_mode == "private" ? true : false

# CIDRs
default_public_access_cidrs = local.is_private ? [] : (var.default_public_access_cidrs == null ? [] : var.default_public_access_cidrs)
vm_public_access_cidrs = local.is_private ? [] : (var.vm_public_access_cidrs == null ? local.default_public_access_cidrs : var.vm_public_access_cidrs)
cluster_endpoint_public_access_cidrs = local.is_private ? [] : (var.cluster_endpoint_public_access_cidrs == null ? local.default_public_access_cidrs : var.cluster_endpoint_public_access_cidrs)
cluster_endpoint_private_access_cidrs = var.cluster_endpoint_private_access_cidrs == null ? [var.vpc_cidr] : var.cluster_endpoint_private_access_cidrs
postgres_public_access_cidrs = local.is_private ? [] : (var.postgres_public_access_cidrs == null ? local.default_public_access_cidrs : var.postgres_public_access_cidrs)

# IPs
create_jump_public_ip = var.create_jump_public_ip == null ? local.is_standard : var.create_jump_public_ip
create_nfs_public_ip = var.create_nfs_public_ip == null ? local.is_standard : var.create_nfs_public_ip

# Subnets
jump_vm_subnet = local.create_jump_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
nfs_vm_subnet = local.create_nfs_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
nfs_vm_subnet_az = local.create_nfs_public_ip ? module.vpc.public_subnet_azs[0] : module.vpc.private_subnet_azs[0]

# Kubernetes
kubeconfig_filename = "${local.cluster_name}-kubeconfig.conf"
kubeconfig_path = var.iac_tooling == "docker" ? "/workspace/${local.kubeconfig_filename}" : local.kubeconfig_filename
kubeconfig_ca_cert = data.aws_eks_cluster.cluster.certificate_authority.0.data

# Mapping node_pools to worker_groups
default_node_pool = [
{
name = "default"
instance_type = var.default_nodepool_vm_type
root_volume_size = var.default_nodepool_os_disk_size
root_volume_type = var.default_nodepool_os_disk_type
root_iops = var.default_nodepool_os_disk_iops
asg_desired_capacity = var.default_nodepool_node_count
asg_min_size = var.default_nodepool_min_nodes
asg_max_size = var.default_nodepool_max_nodes
kubelet_extra_args = "--node-labels=${replace(replace(jsonencode(var.default_nodepool_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", var.default_nodepool_taints)}"
additional_userdata = (var.default_nodepool_custom_data != "" ? file(var.default_nodepool_custom_data) : "")
metadata_http_endpoint = var.default_nodepool_metadata_http_endpoint
metadata_http_tokens = var.default_nodepool_metadata_http_tokens
metadata_http_put_response_hop_limit = var.default_nodepool_metadata_http_put_response_hop_limit

}
]

user_node_pool = [
for np_key, np_value in var.node_pools :
{
name = np_key
instance_type = np_value.vm_type
root_volume_size = np_value.os_disk_size
root_volume_type = np_value.os_disk_type
root_iops = np_value.os_disk_iops
asg_desired_capacity = var.autoscaling_enabled ? np_value.min_nodes == 0 ? 1 : np_value.min_nodes : np_value.min_nodes # TODO - Remove when moving to managed nodes
asg_min_size = np_value.min_nodes
asg_max_size = np_value.max_nodes
kubelet_extra_args = "--node-labels=${replace(replace(jsonencode(np_value.node_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", np_value.node_taints)}"
additional_userdata = (np_value.custom_data != "" ? file(np_value.custom_data) : "")
metadata_http_endpoint = np_value.metadata_http_endpoint
metadata_http_tokens = np_value.metadata_http_tokens
metadata_http_put_response_hop_limit = np_value.metadata_http_put_response_hop_limit
}
]

# Merging the default_node_pool into the work_groups node pools
worker_groups = concat(local.default_node_pool, local.user_node_pool)

# Postgres options/parameters
postgres_options = var.create_postgres ? var.postgres_options : null
postgres_parameters = var.create_postgres ? var.postgres_ssl_enforcement_enabled ? concat(var.postgres_parameters, [{ "apply_method": "immediate", "name": "rds.force_ssl", "value": "1" }]) : concat(var.postgres_parameters, [{ "apply_method": "immediate", "name": "rds.force_ssl", "value": "0" }]) : null

}
Loading

0 comments on commit 332ceb1

Please sign in to comment.