Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(launch-template): Updated launch configs to launch templates for GPE-1387 #24

Merged
merged 4 commits into from
Sep 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 46 additions & 14 deletions tf_files/aws/modules/eks-nodepool/cloud.tf
Original file line number Diff line number Diff line change
Expand Up @@ -264,35 +264,67 @@ resource "aws_security_group_rule" "nodes_interpool_communications" {
# with AutoScaling policies (not implemented here).


resource "aws_launch_configuration" "eks_launch_configuration" {
associate_public_ip_address = false
iam_instance_profile = aws_iam_instance_profile.eks_node_instance_profile.name
image_id = data.aws_ami.eks_worker.id
instance_type = var.nodepool_instance_type
name_prefix = "eks-${var.vpc_name}-nodepool-${var.nodepool}"
security_groups = [aws_security_group.eks_nodes_sg.id, aws_security_group.ssh.id]
user_data_base64 = sensitive(base64encode(templatefile("${path.module}/../../../../flavors/eks/${var.bootstrap_script}",{eks_ca = var.eks_cluster_ca, eks_endpoint = var.eks_cluster_endpoint, eks_region = data.aws_region.current.name, vpc_name = var.vpc_name, ssh_keys = templatefile("${path.module}/../../../../files/authorized_keys/ops_team",{}), nodepool = var.nodepool, lifecycle_type = "ONDEMAND", kernel = var.kernel, activation_id = var.activation_id, customer_id = var.customer_id})))
key_name = var.ec2_keyname

root_block_device {
volume_size = var.nodepool_worker_drive_size
resource "aws_launch_template" "eks_launch_template" {
name_prefix = "eks-${var.vpc_name}-nodepool-${var.nodepool}"
instance_type = var.nodepool_instance_type
image_id = data.aws_ami.eks_worker.id
key_name = var.ec2_keyname

iam_instance_profile {
name = aws_iam_instance_profile.eks_node_instance_profile.name
}

network_interfaces {
associate_public_ip_address = false
security_groups = [aws_security_group.eks_nodes_sg.id, aws_security_group.ssh.id]
}

user_data = sensitive(base64encode(templatefile("${path.module}/../../../../flavors/eks/${var.bootstrap_script}", {
eks_ca = var.eks_cluster_ca,
eks_endpoint = var.eks_cluster_endpoint,
eks_region = data.aws_region.current.name,
vpc_name = var.vpc_name,
ssh_keys = templatefile("${path.module}/../../../../files/authorized_keys/ops_team", {}),
nodepool = var.nodepool,
lifecycle_type = "ONDEMAND",
kernel = var.kernel,
activation_id = var.activation_id,
customer_id = var.customer_id
})))

block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = var.nodepool_worker_drive_size
}
}

tag_specifications {
resource_type = "instance"
tags = {
Name = "eks-${var.vpc_name}-${var.nodepool}"
}
}

lifecycle {
create_before_destroy = true
#ignore_changes = [user_data_base64]
}
}


resource "aws_autoscaling_group" "eks_autoscaling_group" {
desired_capacity = var.nodepool_asg_desired_capacity
protect_from_scale_in = var.scale_in_protection
launch_configuration = aws_launch_configuration.eks_launch_configuration.id
max_size = var.nodepool_asg_max_size
min_size = var.nodepool_asg_min_size
name = "eks-${var.nodepool}worker-node-${var.vpc_name}"
vpc_zone_identifier = flatten([var.eks_private_subnets])

launch_template {
id = aws_launch_template.eks_launch_template.id
version = "$Latest"
}

tag {
key = "Environment"
value = var.vpc_name
Expand Down
60 changes: 44 additions & 16 deletions tf_files/aws/modules/eks/asg.tf
Original file line number Diff line number Diff line change
Expand Up @@ -14,37 +14,66 @@
# See template.tf for more information about the bootstrap script


resource "aws_launch_configuration" "eks_launch_configuration" {
count = var.use_asg ? 1 : 0
associate_public_ip_address = false
iam_instance_profile = aws_iam_instance_profile.eks_node_instance_profile.name
image_id = local.ami
instance_type = var.instance_type
name_prefix = "eks-${var.vpc_name}"
security_groups = [aws_security_group.eks_nodes_sg.id, aws_security_group.ssh.id]
user_data_base64 = sensitive(base64encode(templatefile("${path.module}/../../../../flavors/eks/${var.bootstrap_script}", {eks_ca = aws_eks_cluster.eks_cluster.certificate_authority.0.data, eks_endpoint = aws_eks_cluster.eks_cluster.endpoint, eks_region = data.aws_region.current.name, vpc_name = var.vpc_name, ssh_keys = templatefile("${path.module}/../../../../files/authorized_keys/ops_team", {}), nodepool = "default", lifecycle_type = "ONDEMAND", activation_id = var.activation_id, customer_id = var.customer_id})))
key_name = var.ec2_keyname

root_block_device {
volume_size = var.worker_drive_size
resource "aws_launch_template" "eks_launch_template" {
name_prefix = "eks-${var.vpc_name}"
image_id = local.ami
instance_type = var.instance_type
key_name = var.ec2_keyname

iam_instance_profile {
name = aws_iam_instance_profile.eks_node_instance_profile.name
}

network_interfaces {
associate_public_ip_address = false
security_groups = [aws_security_group.eks_nodes_sg.id, aws_security_group.ssh.id]
}

user_data = sensitive(base64encode(templatefile("${path.module}/../../../../flavors/eks/${var.bootstrap_script}", {
eks_ca = aws_eks_cluster.eks_cluster.certificate_authority[0].data
eks_endpoint = aws_eks_cluster.eks_cluster.endpoint
eks_region = data.aws_region.current.name
vpc_name = var.vpc_name
ssh_keys = templatefile("${path.module}/../../../../files/authorized_keys/ops_team", {})
nodepool = "default"
lifecycle_type = "ONDEMAND"
activation_id = var.activation_id
customer_id = var.customer_id
})))

block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = var.worker_drive_size
}
}

tag_specifications {
resource_type = "instance"
tags = {
Name = "eks-${var.vpc_name}-node"
}
}

lifecycle {
create_before_destroy = true
#ignore_changes = ["user_data_base64"]
}
}

resource "aws_autoscaling_group" "eks_autoscaling_group" {
count = var.use_asg ? 1 : 0
service_linked_role_arn = aws_iam_service_linked_role.autoscaling.arn
desired_capacity = 2
launch_configuration = aws_launch_configuration.eks_launch_configuration[0].id
max_size = 10
min_size = 2
name = "eks-worker-node-${var.vpc_name}"
vpc_zone_identifier = flatten([aws_subnet.eks_private.*.id])

launch_template {
id = aws_launch_template.eks_launch_template.id
version = "$Latest"
}

tag {
key = "Environment"
value = var.vpc_name
Expand Down Expand Up @@ -81,7 +110,6 @@ resource "aws_autoscaling_group" "eks_autoscaling_group" {
propagate_at_launch = true
}

# Avoid unnecessary changes for existing commons running on EKS
lifecycle {
ignore_changes = [desired_capacity, max_size, min_size]
}
Expand Down
47 changes: 33 additions & 14 deletions tf_files/aws/modules/squid_auto/cloud.tf
Original file line number Diff line number Diff line change
Expand Up @@ -68,16 +68,22 @@ resource "aws_route_table_association" "squid_auto0" {


# Auto scaling group for squid auto
resource "aws_launch_template" "squid_auto" {
name_prefix = "${var.env_squid_name}-lt"
instance_type = var.squid_instance_type
image_id = data.aws_ami.public_squid_ami.id
key_name = var.ssh_key_name

iam_instance_profile {
name = aws_iam_instance_profile.squid-auto_role_profile.name
}

network_interfaces {
associate_public_ip_address = true
security_groups = [aws_security_group.squidauto_in.id, aws_security_group.squidauto_out.id]
}

resource "aws_launch_configuration" "squid_auto" {
name_prefix = "${var.env_squid_name}_autoscaling_launch_config"
image_id = data.aws_ami.public_squid_ami.id
instance_type = var.squid_instance_type
security_groups = [aws_security_group.squidauto_in.id, aws_security_group.squidauto_out.id]
key_name = var.ssh_key_name
iam_instance_profile = aws_iam_instance_profile.squid-auto_role_profile.id
associate_public_ip_address = true
user_data = <<EOF
user_data = sensitive(base64encode( <<EOF
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="BOUNDARY"

Expand Down Expand Up @@ -176,16 +182,25 @@ CLOUD_AUTOMATION="$USER_HOME/cloud-automation"
) > /var/log/bootstrapping_script_part2.log
--BOUNDARY--
EOF
))

block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = var.squid_instance_drive_size
}
}

root_block_device {
volume_size = var.squid_instance_drive_size
tag_specifications {
resource_type = "instance"
tags = {
Name = "${var.env_squid_name}"
}
}

lifecycle {
create_before_destroy = true
}

depends_on = [aws_iam_instance_profile.squid-auto_role_profile]
}

resource "null_resource" "service_depends_on" {
Expand Down Expand Up @@ -223,9 +238,13 @@ resource "aws_autoscaling_group" "squid_auto" {
max_size = var.cluster_max_size
min_size = var.cluster_min_size
vpc_zone_identifier = aws_subnet.squid_pub0.*.id
launch_configuration = aws_launch_configuration.squid_auto.name
depends_on = [null_resource.service_depends_on, aws_route_table_association.squid_auto0]

launch_template {
id = aws_launch_template.squid_auto.id
version = "$Latest"
}

tag {
key = "Name"
value = "${var.env_squid_name}-grp-member"
Expand Down
45 changes: 30 additions & 15 deletions tf_files/aws/modules/squid_nlb_central_csoc/cloud.tf
Original file line number Diff line number Diff line change
Expand Up @@ -206,19 +206,25 @@ resource "aws_vpc_endpoint_service" "squid_nlb" {
}

# Auto scaling group for squid nlb
resource "aws_launch_configuration" "squid_nlb" {
name_prefix = "${var.env_nlb_name}_autoscaling_launch_config"
image_id = data.aws_ami.public_squid_ami.id
instance_type = "t3.xlarge"
security_groups = [aws_security_group.squidnlb_in.id, aws_security_group.squidnlb_out.id]
key_name = var.ssh_key_name
iam_instance_profile = aws_iam_instance_profile.squid-nlb_role_profile.id
associate_public_ip_address = true
depends_on = [aws_iam_instance_profile.squid-nlb_role_profile]
user_data = <<EOF
resource "aws_launch_template" "squid_nlb" {
name_prefix = "${var.env_nlb_name}-lt"
instance_type = "t3.xlarge"
image_id = data.aws_ami.public_squid_ami.id
key_name = var.ssh_key_name

iam_instance_profile {
name = aws_iam_instance_profile.squid-nlb_role_profile.name
}

network_interfaces {
associate_public_ip_address = true
security_groups = [aws_security_group.squidnlb_in.id, aws_security_group.squidnlb_out.id]
}

user_data = sensitive(base64encode( <<EOF
#!/bin/bash
cd /home/ubuntu
sudo git clone https://github.com/uc-cdis/cloud-automation.git
sudo git clone https://github.com/uc-cdis/cloud-automation.gits
sudo chown -R ubuntu. /home/ubuntu/cloud-automation
cd /home/ubuntu/cloud-automation
git pull
Expand All @@ -241,14 +247,19 @@ sudo apt-get autoclean
cd /home/ubuntu
sudo bash "${var.bootstrap_path}${var.bootstrap_script}" 2>&1 |sudo tee --append /var/log/bootstrapping_script.log
EOF

root_block_device {
volume_size = 30
))
block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = 30
}
}

lifecycle {
create_before_destroy = true
}

depends_on = [aws_iam_instance_profile.squid-nlb_role_profile]
}

resource "aws_autoscaling_group" "squid_nlb" {
Expand All @@ -261,7 +272,11 @@ resource "aws_autoscaling_group" "squid_nlb" {
min_size = 1
target_group_arns = [aws_lb_target_group.squid_nlb-http.arn, aws_lb_target_group.squid_nlb-sftp.arn]
vpc_zone_identifier = [aws_subnet.squid_pub0.id, aws_subnet.squid_pub1.id, aws_subnet.squid_pub2.id, aws_subnet.squid_pub3.id, aws_subnet.squid_pub4.id, aws_subnet.squid_pub5.id]
launch_configuration = aws_launch_configuration.squid_nlb.name

launch_template {
id = aws_launch_template.squid_nlb.id
version = "$Latest"
}

tag {
key = "Name"
Expand Down
43 changes: 32 additions & 11 deletions tf_files/aws/modules/squidnlb/cloud.tf
Original file line number Diff line number Diff line change
Expand Up @@ -148,16 +148,22 @@ resource "aws_lb_listener" "squid_nlb-sftp" {
}

# Auto scaling group for squid nlb
resource "aws_launch_configuration" "squid_nlb" {
name_prefix = "${var.env_nlb_name}_autoscaling_launch_config"
image_id = data.aws_ami.public_squid_ami.id
instance_type = "t2.medium"
security_groups = [aws_security_group.squidnlb_in.id, aws_security_group.squidnlb_out.id]
key_name = var.ssh_key_name
iam_instance_profile = aws_iam_instance_profile.squid-nlb_role_profile.id
associate_public_ip_address = true
depends_on = [aws_iam_instance_profile.squid-nlb_role_profile]
user_data = <<EOF
resource "aws_launch_template" "squid_nlb" {
name_prefix = "${var.env_nlb_name}-lt"
instance_type = "t2.medium"
image_id = data.aws_ami.public_squid_ami.id
key_name = var.ssh_key_name

iam_instance_profile {
name = aws_iam_instance_profile.squid-nlb_role_profile.name
}

network_interfaces {
associate_public_ip_address = true
security_groups = [aws_security_group.squidnlb_in.id, aws_security_group.squidnlb_out.id]
}

user_data = sensitive(base64encode( <<EOF
#!/bin/bash
cd /home/ubuntu
sudo git clone https://github.com/uc-cdis/cloud-automation.git
Expand All @@ -181,12 +187,23 @@ sudo apt-get autoclean
cd /home/ubuntu
sudo bash "${var.bootstrap_path}${var.bootstrap_script}" 2>&1 |sudo tee --append /var/log/bootstrapping_script.log
EOF
))

block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = 30
}
}

lifecycle {
create_before_destroy = true
}

depends_on = [aws_iam_instance_profile.squid-nlb_role_profile]
}


resource "aws_autoscaling_group" "squid_nlb" {
name = "${var.env_nlb_name}_autoscaling_grp"
#If you define a list of subnet IDs split across the desired availability zones set them using vpc_zone_identifier
Expand All @@ -197,7 +214,11 @@ resource "aws_autoscaling_group" "squid_nlb" {
min_size = 1
target_group_arns = [aws_lb_target_group.squid_nlb-http.arn, aws_lb_target_group.squid_nlb-sftp.arn]
vpc_zone_identifier = [aws_subnet.squid_pub0.id, aws_subnet.squid_pub1.id, aws_subnet.squid_pub2.id]
launch_configuration = aws_launch_configuration.squid_nlb.name

launch_template {
id = aws_launch_template.squid_nlb.id
version = "$Latest"
}

tag {
key = "Name"
Expand Down
Loading