diff --git a/tf_files/aws/modules/eks-nodepool/cloud.tf b/tf_files/aws/modules/eks-nodepool/cloud.tf index 6e67f532..960381cd 100644 --- a/tf_files/aws/modules/eks-nodepool/cloud.tf +++ b/tf_files/aws/modules/eks-nodepool/cloud.tf @@ -264,35 +264,67 @@ resource "aws_security_group_rule" "nodes_interpool_communications" { # with AutoScaling policies (not implemented here). -resource "aws_launch_configuration" "eks_launch_configuration" { - associate_public_ip_address = false - iam_instance_profile = aws_iam_instance_profile.eks_node_instance_profile.name - image_id = data.aws_ami.eks_worker.id - instance_type = var.nodepool_instance_type - name_prefix = "eks-${var.vpc_name}-nodepool-${var.nodepool}" - security_groups = [aws_security_group.eks_nodes_sg.id, aws_security_group.ssh.id] - user_data_base64 = sensitive(base64encode(templatefile("${path.module}/../../../../flavors/eks/${var.bootstrap_script}",{eks_ca = var.eks_cluster_ca, eks_endpoint = var.eks_cluster_endpoint, eks_region = data.aws_region.current.name, vpc_name = var.vpc_name, ssh_keys = templatefile("${path.module}/../../../../files/authorized_keys/ops_team",{}), nodepool = var.nodepool, lifecycle_type = "ONDEMAND", kernel = var.kernel, activation_id = var.activation_id, customer_id = var.customer_id}))) - key_name = var.ec2_keyname - - root_block_device { - volume_size = var.nodepool_worker_drive_size +resource "aws_launch_template" "eks_launch_template" { + name_prefix = "eks-${var.vpc_name}-nodepool-${var.nodepool}" + instance_type = var.nodepool_instance_type + image_id = data.aws_ami.eks_worker.id + key_name = var.ec2_keyname + + iam_instance_profile { + name = aws_iam_instance_profile.eks_node_instance_profile.name + } + + network_interfaces { + associate_public_ip_address = false + security_groups = [aws_security_group.eks_nodes_sg.id, aws_security_group.ssh.id] + } + + user_data = sensitive(base64encode(templatefile("${path.module}/../../../../flavors/eks/${var.bootstrap_script}", { + eks_ca = var.eks_cluster_ca, + eks_endpoint = var.eks_cluster_endpoint, + eks_region = data.aws_region.current.name, + vpc_name = var.vpc_name, + ssh_keys = templatefile("${path.module}/../../../../files/authorized_keys/ops_team", {}), + nodepool = var.nodepool, + lifecycle_type = "ONDEMAND", + kernel = var.kernel, + activation_id = var.activation_id, + customer_id = var.customer_id + }))) + + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = var.nodepool_worker_drive_size + } + } + + tag_specifications { + resource_type = "instance" + tags = { + Name = "eks-${var.vpc_name}-${var.nodepool}" + } } lifecycle { create_before_destroy = true - #ignore_changes = [user_data_base64] } } + resource "aws_autoscaling_group" "eks_autoscaling_group" { desired_capacity = var.nodepool_asg_desired_capacity protect_from_scale_in = var.scale_in_protection - launch_configuration = aws_launch_configuration.eks_launch_configuration.id max_size = var.nodepool_asg_max_size min_size = var.nodepool_asg_min_size name = "eks-${var.nodepool}worker-node-${var.vpc_name}" vpc_zone_identifier = flatten([var.eks_private_subnets]) + launch_template { + id = aws_launch_template.eks_launch_template.id + version = "$Latest" + } + tag { key = "Environment" value = var.vpc_name diff --git a/tf_files/aws/modules/eks/asg.tf b/tf_files/aws/modules/eks/asg.tf index ee2b0cd6..91200a86 100644 --- a/tf_files/aws/modules/eks/asg.tf +++ b/tf_files/aws/modules/eks/asg.tf @@ -14,24 +14,49 @@ # See template.tf for more information about the bootstrap script -resource "aws_launch_configuration" "eks_launch_configuration" { - count = var.use_asg ? 1 : 0 - associate_public_ip_address = false - iam_instance_profile = aws_iam_instance_profile.eks_node_instance_profile.name - image_id = local.ami - instance_type = var.instance_type - name_prefix = "eks-${var.vpc_name}" - security_groups = [aws_security_group.eks_nodes_sg.id, aws_security_group.ssh.id] - user_data_base64 = sensitive(base64encode(templatefile("${path.module}/../../../../flavors/eks/${var.bootstrap_script}", {eks_ca = aws_eks_cluster.eks_cluster.certificate_authority.0.data, eks_endpoint = aws_eks_cluster.eks_cluster.endpoint, eks_region = data.aws_region.current.name, vpc_name = var.vpc_name, ssh_keys = templatefile("${path.module}/../../../../files/authorized_keys/ops_team", {}), nodepool = "default", lifecycle_type = "ONDEMAND", activation_id = var.activation_id, customer_id = var.customer_id}))) - key_name = var.ec2_keyname - - root_block_device { - volume_size = var.worker_drive_size +resource "aws_launch_template" "eks_launch_template" { + name_prefix = "eks-${var.vpc_name}" + image_id = local.ami + instance_type = var.instance_type + key_name = var.ec2_keyname + + iam_instance_profile { + name = aws_iam_instance_profile.eks_node_instance_profile.name + } + + network_interfaces { + associate_public_ip_address = false + security_groups = [aws_security_group.eks_nodes_sg.id, aws_security_group.ssh.id] + } + + user_data = sensitive(base64encode(templatefile("${path.module}/../../../../flavors/eks/${var.bootstrap_script}", { + eks_ca = aws_eks_cluster.eks_cluster.certificate_authority[0].data + eks_endpoint = aws_eks_cluster.eks_cluster.endpoint + eks_region = data.aws_region.current.name + vpc_name = var.vpc_name + ssh_keys = templatefile("${path.module}/../../../../files/authorized_keys/ops_team", {}) + nodepool = "default" + lifecycle_type = "ONDEMAND" + activation_id = var.activation_id + customer_id = var.customer_id + }))) + + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = var.worker_drive_size + } + } + + tag_specifications { + resource_type = "instance" + tags = { + Name = "eks-${var.vpc_name}-node" + } } lifecycle { create_before_destroy = true - #ignore_changes = ["user_data_base64"] } } @@ -39,12 +64,16 @@ resource "aws_autoscaling_group" "eks_autoscaling_group" { count = var.use_asg ? 1 : 0 service_linked_role_arn = aws_iam_service_linked_role.autoscaling.arn desired_capacity = 2 - launch_configuration = aws_launch_configuration.eks_launch_configuration[0].id max_size = 10 min_size = 2 name = "eks-worker-node-${var.vpc_name}" vpc_zone_identifier = flatten([aws_subnet.eks_private.*.id]) + launch_template { + id = aws_launch_template.eks_launch_template.id + version = "$Latest" + } + tag { key = "Environment" value = var.vpc_name @@ -81,7 +110,6 @@ resource "aws_autoscaling_group" "eks_autoscaling_group" { propagate_at_launch = true } -# Avoid unnecessary changes for existing commons running on EKS lifecycle { ignore_changes = [desired_capacity, max_size, min_size] } diff --git a/tf_files/aws/modules/squid_auto/cloud.tf b/tf_files/aws/modules/squid_auto/cloud.tf index 9c2a813b..0ad41850 100644 --- a/tf_files/aws/modules/squid_auto/cloud.tf +++ b/tf_files/aws/modules/squid_auto/cloud.tf @@ -68,16 +68,22 @@ resource "aws_route_table_association" "squid_auto0" { # Auto scaling group for squid auto +resource "aws_launch_template" "squid_auto" { + name_prefix = "${var.env_squid_name}-lt" + instance_type = var.squid_instance_type + image_id = data.aws_ami.public_squid_ami.id + key_name = var.ssh_key_name + + iam_instance_profile { + name = aws_iam_instance_profile.squid-auto_role_profile.name + } + + network_interfaces { + associate_public_ip_address = true + security_groups = [aws_security_group.squidauto_in.id, aws_security_group.squidauto_out.id] + } -resource "aws_launch_configuration" "squid_auto" { - name_prefix = "${var.env_squid_name}_autoscaling_launch_config" - image_id = data.aws_ami.public_squid_ami.id - instance_type = var.squid_instance_type - security_groups = [aws_security_group.squidauto_in.id, aws_security_group.squidauto_out.id] - key_name = var.ssh_key_name - iam_instance_profile = aws_iam_instance_profile.squid-auto_role_profile.id - associate_public_ip_address = true - user_data = < /var/log/bootstrapping_script_part2.log --BOUNDARY-- EOF + )) + + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = var.squid_instance_drive_size + } + } - root_block_device { - volume_size = var.squid_instance_drive_size + tag_specifications { + resource_type = "instance" + tags = { + Name = "${var.env_squid_name}" + } } lifecycle { create_before_destroy = true } - - depends_on = [aws_iam_instance_profile.squid-auto_role_profile] } resource "null_resource" "service_depends_on" { @@ -223,9 +238,13 @@ resource "aws_autoscaling_group" "squid_auto" { max_size = var.cluster_max_size min_size = var.cluster_min_size vpc_zone_identifier = aws_subnet.squid_pub0.*.id - launch_configuration = aws_launch_configuration.squid_auto.name depends_on = [null_resource.service_depends_on, aws_route_table_association.squid_auto0] + launch_template { + id = aws_launch_template.squid_auto.id + version = "$Latest" + } + tag { key = "Name" value = "${var.env_squid_name}-grp-member" diff --git a/tf_files/aws/modules/squid_nlb_central_csoc/cloud.tf b/tf_files/aws/modules/squid_nlb_central_csoc/cloud.tf index 478ae0d7..12584877 100644 --- a/tf_files/aws/modules/squid_nlb_central_csoc/cloud.tf +++ b/tf_files/aws/modules/squid_nlb_central_csoc/cloud.tf @@ -206,19 +206,25 @@ resource "aws_vpc_endpoint_service" "squid_nlb" { } # Auto scaling group for squid nlb -resource "aws_launch_configuration" "squid_nlb" { - name_prefix = "${var.env_nlb_name}_autoscaling_launch_config" - image_id = data.aws_ami.public_squid_ami.id - instance_type = "t3.xlarge" - security_groups = [aws_security_group.squidnlb_in.id, aws_security_group.squidnlb_out.id] - key_name = var.ssh_key_name - iam_instance_profile = aws_iam_instance_profile.squid-nlb_role_profile.id - associate_public_ip_address = true - depends_on = [aws_iam_instance_profile.squid-nlb_role_profile] - user_data = <&1 |sudo tee --append /var/log/bootstrapping_script.log EOF - - root_block_device { - volume_size = 30 + )) + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = 30 + } } lifecycle { create_before_destroy = true } + + depends_on = [aws_iam_instance_profile.squid-nlb_role_profile] } resource "aws_autoscaling_group" "squid_nlb" { @@ -261,7 +272,11 @@ resource "aws_autoscaling_group" "squid_nlb" { min_size = 1 target_group_arns = [aws_lb_target_group.squid_nlb-http.arn, aws_lb_target_group.squid_nlb-sftp.arn] vpc_zone_identifier = [aws_subnet.squid_pub0.id, aws_subnet.squid_pub1.id, aws_subnet.squid_pub2.id, aws_subnet.squid_pub3.id, aws_subnet.squid_pub4.id, aws_subnet.squid_pub5.id] - launch_configuration = aws_launch_configuration.squid_nlb.name + + launch_template { + id = aws_launch_template.squid_nlb.id + version = "$Latest" + } tag { key = "Name" diff --git a/tf_files/aws/modules/squidnlb/cloud.tf b/tf_files/aws/modules/squidnlb/cloud.tf index ff66d2e2..945f0af8 100644 --- a/tf_files/aws/modules/squidnlb/cloud.tf +++ b/tf_files/aws/modules/squidnlb/cloud.tf @@ -148,16 +148,22 @@ resource "aws_lb_listener" "squid_nlb-sftp" { } # Auto scaling group for squid nlb -resource "aws_launch_configuration" "squid_nlb" { - name_prefix = "${var.env_nlb_name}_autoscaling_launch_config" - image_id = data.aws_ami.public_squid_ami.id - instance_type = "t2.medium" - security_groups = [aws_security_group.squidnlb_in.id, aws_security_group.squidnlb_out.id] - key_name = var.ssh_key_name - iam_instance_profile = aws_iam_instance_profile.squid-nlb_role_profile.id - associate_public_ip_address = true - depends_on = [aws_iam_instance_profile.squid-nlb_role_profile] - user_data = <&1 |sudo tee --append /var/log/bootstrapping_script.log EOF + )) + + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = 30 + } + } lifecycle { create_before_destroy = true } + + depends_on = [aws_iam_instance_profile.squid-nlb_role_profile] } + resource "aws_autoscaling_group" "squid_nlb" { name = "${var.env_nlb_name}_autoscaling_grp" #If you define a list of subnet IDs split across the desired availability zones set them using vpc_zone_identifier @@ -197,7 +214,11 @@ resource "aws_autoscaling_group" "squid_nlb" { min_size = 1 target_group_arns = [aws_lb_target_group.squid_nlb-http.arn, aws_lb_target_group.squid_nlb-sftp.arn] vpc_zone_identifier = [aws_subnet.squid_pub0.id, aws_subnet.squid_pub1.id, aws_subnet.squid_pub2.id] - launch_configuration = aws_launch_configuration.squid_nlb.name + + launch_template { + id = aws_launch_template.squid_nlb.id + version = "$Latest" + } tag { key = "Name" diff --git a/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf b/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf index 86f91489..c1012e70 100644 --- a/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf +++ b/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf @@ -148,16 +148,22 @@ resource "aws_lb_listener" "vpn_nlb-ssh" { } # Auto scaling group for VPN nlb -resource "aws_launch_configuration" "vpn_nlb" { - name_prefix = "${var.env_vpn_nlb_name}_autoscaling_launch_config" - image_id = data.aws_ami.public_vpn_ami.id - instance_type = "m5.xlarge" - security_groups = [aws_security_group.vpnnlb_in.id, aws_security_group.vpnnlb_out.id] - key_name = var.ssh_key_name - iam_instance_profile = aws_iam_instance_profile.vpn-nlb_role_profile.id - associate_public_ip_address = true - depends_on = [aws_iam_instance_profile.vpn-nlb_role_profile] - user_data = < /var/log/bootstrapping_script.log EOF + )) -root_block_device { - volume_size = 30 -} + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = 30 + } + } -lifecycle { + lifecycle { create_before_destroy = true } + + depends_on = [aws_iam_instance_profile.vpn-nlb_role_profile] } resource "aws_autoscaling_group" "vpn_nlb" { @@ -217,7 +229,11 @@ resource "aws_autoscaling_group" "vpn_nlb" { min_size = 1 target_group_arns = [aws_lb_target_group.vpn_nlb-tcp.arn,aws_lb_target_group.vpn_nlb-qr.arn,aws_lb_target_group.vpn_nlb-ssh.arn] vpc_zone_identifier = aws_subnet.vpn_pub0.*.id - launch_configuration = aws_launch_configuration.vpn_nlb.name + + launch_template { + id = aws_launch_template.vpn_nlb.id + version = "$Latest" + } tag { key = "Name"