diff --git a/install/terraform/aws/backend.tf b/install/terraform/aws/backend.tf new file mode 100644 index 00000000..91722a40 --- /dev/null +++ b/install/terraform/aws/backend.tf @@ -0,0 +1,7 @@ +terraform { + backend "s3" { + # bucket = "243019462621-terraform-state" + # key = "primary/us-west-2/substratus/terraform.tfstate" + # region = "us-west-2" + } +} diff --git a/install/terraform/aws/bucket.tf b/install/terraform/aws/bucket.tf new file mode 100644 index 00000000..5c4200f7 --- /dev/null +++ b/install/terraform/aws/bucket.tf @@ -0,0 +1,6 @@ +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket" "artifacts" { + count = var.existing_artifacts_bucket == null ? 1 : 0 + bucket = "${data.aws_caller_identity.current.account_id}-${var.name_prefix}-artifacts" +} diff --git a/install/terraform/aws/common.tf b/install/terraform/aws/common.tf new file mode 100644 index 00000000..89f2f898 --- /dev/null +++ b/install/terraform/aws/common.tf @@ -0,0 +1,24 @@ +locals { + # passed to cluster.tf + vpc = { + id = var.existing_vpc == null ? module.vpc[0].vpc_id : var.existing_vpc.id + private_subnet_ids = var.existing_vpc == null ? module.vpc[0].private_subnets : var.existing_vpc.private_subnet_ids + intra_subnet_ids = var.existing_vpc == null ? module.vpc[0].intra_subnets : var.existing_vpc.intra_subnet_ids + } + + # passed to substratus_irsa_iam_roles.tf and eks_irsa_iam_roles.tf + eks_cluster = { + name = var.existing_eks_cluster == null ? module.eks[0].cluster_name : var.existing_eks_cluster.name + oidc_provider_arn = var.existing_eks_cluster == null ? module.eks[0].oidc_provider_arn : var.existing_eks_cluster.oidc_provider_arn + managed_node_groups = var.existing_eks_cluster == null ? module.eks[0].eks_managed_node_groups : null + certificate_authority_data = var.existing_eks_cluster == null ? module.eks[0].cluster_certificate_authority_data : "" + endpoint = var.existing_eks_cluster == null ? module.eks[0].cluster_endpoint : "" + } + + artifacts_bucket = { + arn = var.existing_artifacts_bucket == null ? aws_s3_bucket.artifacts[0].arn : var.existing_artifacts_bucket.arn + id = var.existing_artifacts_bucket == null ? aws_s3_bucket.artifacts[0].id : var.existing_artifacts_bucket.id + } + + ecr_repository_arn = var.existing_ecr_repository_arn == "" ? aws_ecr_repository.main[0].arn : var.existing_ecr_repository_arn +} diff --git a/install/terraform/aws/container_registry.tf b/install/terraform/aws/container_registry.tf new file mode 100644 index 00000000..44e59616 --- /dev/null +++ b/install/terraform/aws/container_registry.tf @@ -0,0 +1,8 @@ +resource "aws_ecr_repository" "main" { + count = var.existing_ecr_repository_arn == "" ? 1 : 0 + name = var.name_prefix + image_tag_mutability = "MUTABLE" + image_scanning_configuration { + scan_on_push = var.image_scan_on_push + } +} diff --git a/install/terraform/aws/eks_cluster.tf b/install/terraform/aws/eks_cluster.tf new file mode 100644 index 00000000..b573b7d4 --- /dev/null +++ b/install/terraform/aws/eks_cluster.tf @@ -0,0 +1,237 @@ +locals { + create_cluster = var.existing_eks_cluster == null ? 1 : 0 + # We need to lookup K8s taint effect from the AWS API value + taint_effects = { + NO_SCHEDULE = "NoSchedule" + NO_EXECUTE = "NoExecute" + PREFER_NO_SCHEDULE = "PreferNoSchedule" + } + + # The following locals are used to configure tags for the EKS cluster's Auto + # Scaling Groups managed by the cluster autoscaler. + + # `cluster_autoscaler_label_tags` contains the tags related to the Kubernetes + # labels applied to the nodes in the cluster's managed node groups. + # Each tag has a key formed from the node group's name and label name, and a + # value containing the autoscaling group's name, the corresponding + # Kubernetes label key, and its value. These tags are used by the cluster + # autoscaler to determine how nodes should be scaled based on their labels. + cluster_autoscaler_label_tags = local.eks_cluster.managed_node_groups != null ? merge([ + for name, group in local.eks_cluster.managed_node_groups : { + for label_name, label_value in coalesce(group.node_group_labels, {}) : "${name}|label|${label_name}" => { + autoscaling_group = group.node_group_autoscaling_group_names[0], + key = "k8s.io/cluster-autoscaler/node-template/label/${label_name}", + value = label_value, + } + } + ]...) : {} + + # `cluster_autoscaler_taint_tags` contains tags related to the Kubernetes + # taints applied to the nodes in the cluster's managed node groups. + # Each tag's key includes the node group's name and taint key, and its value + # contains information about the taint, such as its value and effect. + # These tags allow the cluster autoscaler to respect the taints when scaling nodes. + cluster_autoscaler_taint_tags = local.eks_cluster.managed_node_groups != null ? merge([ + for name, group in local.eks_cluster.managed_node_groups : { + for taint in coalesce(group.node_group_taints, []) : "${name}|taint|${taint.key}" => { + autoscaling_group = group.node_group_autoscaling_group_names[0], + key = "k8s.io/cluster-autoscaler/node-template/taint/${taint.key}" + value = "${taint.value}:${local.taint_effects[taint.effect]}" + } + } + ]...) : {} + + # `cluster_autoscaler_asg_tags` combines the above label and taint tags into a + # single map, which is then used to create the actual tags on the AWS ASGs + # through the `aws_autoscaling_group_tag` resource. The tags are only applied + # if `existing_eks_cluster` is `null`, ensuring they are only created for new + # clusters. + cluster_autoscaler_asg_tags = merge( + local.cluster_autoscaler_label_tags, + local.cluster_autoscaler_taint_tags + ) +} + +data "aws_ec2_instance_types" "gpu" { + filter { + name = "instance-type" + # from: aws ec2 describe-instance-types --region us-west-2 --query "InstanceTypes[?GpuInfo!=null].InstanceType" --output json | jq -r '.[]' | awk -F. '{print "\"" $1 ".*\","}' | uniq + # non-CUDA supported types added and commented out for now though these have accelerators of some kind + values = [ + # "dl1.*", # no CUDA support + # "inf1.*" # no CUDA support + # "inf2.*" # no CUDA support + "g2.*", + "g3.*", + "g3s.*", + "g4ad.*", + "g4dn.*", + "g5.*", + # "g5g.*", exclude g5g as these are ARM machines + "p2.*", + "p3.*", + "p3dn.*", + "p4d.*", + # "p5.*", # no CUDA support + # "trn1.*", # no CUDA support + # "trn1n32.*", # no CUDA support + ] + } +} + +data "aws_ami" "eks_default" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amazon-eks-node-${var.cluster_version}-v*"] + } + filter { + name = "architecture" + values = ["x86_64"] + } +} + +data "aws_ami" "deep_learning" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + # they don't produce images on any Ubuntu OS newer than this :shrug: + values = ["Deep Learning AMI (Ubuntu 18.04) Version ??.?"] + } + filter { + name = "architecture" + values = ["x86_64"] + } + + filter { + name = "state" + values = ["available"] + } +} + +module "eks" { + count = local.create_cluster + source = "terraform-aws-modules/eks/aws" + version = "19.15.4" + cluster_name = var.name_prefix + cluster_version = var.cluster_version + cluster_endpoint_public_access = true + cluster_ip_family = "ipv4" + vpc_id = local.vpc.id + subnet_ids = local.vpc.private_subnet_ids + control_plane_subnet_ids = local.vpc.intra_subnet_ids + manage_aws_auth_configmap = true + + eks_managed_node_group_defaults = { + # We are using the IRSA created below for permissions + # However, we have to deploy with the policy attached FIRST (when creating a fresh cluster) + # and then turn this off after the cluster/node group is created. Without this initial policy, + # the VPC CNI fails to assign IPs and nodes cannot join the cluster + # See https://github.com/aws/containers-roadmap/issues/1666 for more context + iam_role_attach_cni_policy = true + subnet_ids = local.vpc.private_subnet_ids + labels = var.labels + ebs_optimized = true + disable_api_termination = false + enable_monitoring = true + use_custom_launch_template = false + force_update_version = true + } + + eks_managed_node_groups = { + builder = { + # By default, the module creates a launch template to ensure tags are propagated to instances, etc., + # so we need to disable it to use the default template provided by the AWS EKS managed node group service + name_prefix = "container-builder" + ami_id = data.aws_ami.eks_default.image_id + disk_size = 100 + min_size = 1 + max_size = 3 + desired_size = 1 + instance_types = [ + "t3a.large" + ] + capacity_type = "SPOT" + local_storage_types = ["ssd"] + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 100 + volume_type = "gp3" + iops = 3000 + throughput = 150 + encrypted = true + delete_on_termination = true + } + } + } + } + + gpu = { + name_prefix = "gpu" + description = "GPU node launch template" + min_size = 0 + max_size = 32 + desired_size = 0 + + ami_id = data.aws_ami.deep_learning.image_id + capacity_type = "SPOT" + instance_types = sort(data.aws_ec2_instance_types.gpu.instance_types) + + update_config = { + max_unavailable_percentage = 100 + } + + local_storage_types = ["ssd"] + block_device_mappings = { + xvda = { + device_name = "/dev/xvda" + ebs = { + volume_size = 100 + volume_type = "gp3" + iops = 3000 + throughput = 150 + encrypted = true + delete_on_termination = true + } + } + } + + metadata_options = { + http_endpoint = "enabled" + http_tokens = "required" + instance_metadata_tags = "disabled" + } + + create_iam_role = true + iam_role_name = "eks-managed-gpu-node-group" + iam_role_use_name_prefix = false + iam_role_description = "EKS managed GPU node group" + iam_role_tags = { + Purpose = "Protector of the kubelet" + } + iam_role_additional_policies = { + AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + } + } + } + tags = var.tags +} + +# ASG tags are needed for the cluster to work with the labels and taints of the +# node groups +resource "aws_autoscaling_group_tag" "cluster_autoscaler_label_tags" { + for_each = var.existing_eks_cluster == null ? local.cluster_autoscaler_asg_tags : {} + autoscaling_group_name = each.value.autoscaling_group + + tag { + key = each.value.key + value = each.value.value + propagate_at_launch = false + } +} diff --git a/install/terraform/aws/eks_irsa_iam_roles.tf b/install/terraform/aws/eks_irsa_iam_roles.tf new file mode 100644 index 00000000..ab788e60 --- /dev/null +++ b/install/terraform/aws/eks_irsa_iam_roles.tf @@ -0,0 +1,95 @@ +# EKS specific IRSA Roles + +# Note: these are currently not used but should be as we install the associated +# add-ons (however we decide to do that) +module "cluster_autoscaler_irsa_role" { + count = local.create_cluster + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "cluster-autoscaler" + attach_cluster_autoscaler_policy = true + cluster_autoscaler_cluster_names = [local.eks_cluster.name] + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:cluster-autoscaler"] + } + } + + tags = var.tags +} + +module "ebs_csi_irsa_role" { + count = local.create_cluster + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "ebs-csi" + attach_ebs_csi_policy = true + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] + } + } + + tags = var.tags +} + +module "load_balancer_controller_irsa_role" { + count = local.create_cluster + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "load-balancer-controller" + attach_load_balancer_controller_policy = true + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-load-balancer-controller"] + } + } + + tags = var.tags +} + +module "node_termination_handler_irsa_role" { + count = local.create_cluster + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "node-termination-handler" + attach_node_termination_handler_policy = true + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = var.tags +} + +module "vpc_cni_ipv4_irsa_role" { + count = local.create_cluster + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "vpc-cni-ipv4" + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = var.tags +} diff --git a/install/terraform/aws/outputs.tf b/install/terraform/aws/outputs.tf new file mode 100644 index 00000000..7846c2d8 --- /dev/null +++ b/install/terraform/aws/outputs.tf @@ -0,0 +1,33 @@ +output "artifacts_bucket" { + value = { + arn = local.artifacts_bucket.arn + id = local.artifacts_bucket.id + } +} + +output "cluster_name" { + value = local.eks_cluster.name +} + +output "cluster_region" { + value = var.region +} + +output "cluster" { + value = { + name = local.eks_cluster.name + oidc_provider_arn = local.eks_cluster.oidc_provider_arn + } +} + +output "ecr_repository_arn" { + value = local.ecr_repository_arn +} + +output "vpc" { + value = { + id = local.vpc.id + private_subnet_ids = local.vpc.private_subnet_ids + intra_subnet_ids = local.vpc.intra_subnet_ids + } +} diff --git a/install/terraform/aws/providers.tf b/install/terraform/aws/providers.tf new file mode 100644 index 00000000..808daadc --- /dev/null +++ b/install/terraform/aws/providers.tf @@ -0,0 +1,15 @@ +provider "aws" { + region = var.region +} + +provider "kubernetes" { + host = local.eks_cluster.endpoint + cluster_ca_certificate = base64decode(local.eks_cluster.certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", local.eks_cluster.name] + } +} diff --git a/install/terraform/aws/substratus_irsa_iam_roles.tf b/install/terraform/aws/substratus_irsa_iam_roles.tf new file mode 100644 index 00000000..e869b1a4 --- /dev/null +++ b/install/terraform/aws/substratus_irsa_iam_roles.tf @@ -0,0 +1,193 @@ +resource "aws_iam_policy" "ecr_writer" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + name = "${var.name_prefix}-ecr-writer" + description = "A policy allowing full access to the ${local.artifacts_bucket.id} bucket" + + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Action" : [ + "ecr:*" + ], + "Resource" : local.ecr_repository_arn + } + ] + }) + + tags = var.tags +} + +resource "aws_iam_policy" "s3_full_bucket_access" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + name = "${var.name_prefix}-AmazonS3FullAccess" + description = "A policy allowing full access to the ${local.artifacts_bucket.id} bucket" + + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Action" : [ + "s3:*", + "s3-object-lambda:*" + ], + "Resource" : [ + "${local.artifacts_bucket.arn}", + "${local.artifacts_bucket.arn}/*", + ] + } + ] + }) + + tags = var.tags +} + +resource "aws_iam_policy" "s3_readonly_bucket_access" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + name = "${var.name_prefix}-AmazonS3ReadOnlyAccess" + description = "A policy allowing read-only access to the ${local.artifacts_bucket.id} bucket" + + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Action" : [ + "s3:Get*", + "s3:List*", + "s3-object-lambda:Get*", + "s3-object-lambda:List*" + ], + "Resource" : [ + "${local.artifacts_bucket.arn}", + "${local.artifacts_bucket.arn}/*", + ] + } + ] + }) + + tags = var.tags +} + +module "container_builder_irsa" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "${var.name_prefix}-container-builder-" + role_policy_arns = { + ECRWriter = aws_iam_policy.ecr_writer[0].arn + SubstratusAmazonS3ReadOnlyAccess = aws_iam_policy.s3_readonly_bucket_access[0].arn + } + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["default:container-builder"] + } + } + + tags = var.tags +} + +module "modeller_irsa" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "${var.name_prefix}-modeller-" + role_policy_arns = { + SubstratusAmazonS3FullAccess = aws_iam_policy.s3_full_bucket_access[0].arn + } + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["default:modeller"] + } + } + + tags = var.tags +} + +module "model_server_irsa" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "${var.name_prefix}-model-server-" + role_policy_arns = { + SubstratusAmazonS3FullAccess = aws_iam_policy.s3_full_bucket_access[0].arn + } + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["default:model-server"] + } + } + + tags = var.tags +} + +module "notebook_irsa" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "${var.name_prefix}-notebook-" + role_policy_arns = { + SubstratusAmazonS3FullAccess = aws_iam_policy.s3_full_bucket_access[0].arn + } + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["default:notebook"] + } + } + + tags = var.tags +} + +module "data_loader_irsa" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "${var.name_prefix}-data-loader-" + role_policy_arns = { + SubstratusAmazonS3FullAccess = aws_iam_policy.s3_full_bucket_access[0].arn + } + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["default:data-loader"] + } + } + + tags = var.tags +} + +module "aws_manager_irsa" { + count = var.create_substratus_irsa_roles == true ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.28" + + role_name_prefix = "${var.name_prefix}-aws-manager-" + role_policy_arns = { + SubstratusAmazonS3FullAccess = aws_iam_policy.s3_full_bucket_access[0].arn + } + + oidc_providers = { + main = { + provider_arn = local.eks_cluster.oidc_provider_arn + namespace_service_accounts = ["substratus:aws-manager"] + } + } + + tags = var.tags +} diff --git a/install/terraform/aws/variables.tf b/install/terraform/aws/variables.tf new file mode 100644 index 00000000..8fcd7157 --- /dev/null +++ b/install/terraform/aws/variables.tf @@ -0,0 +1,86 @@ +variable "cluster_version" { + description = "The version of the EKS cluster to deploy (i.e., this is used when var.existing_eks_cluster is null)" + type = string + default = "1.27" +} + +variable "create_substratus_irsa_roles" { + description = "A boolean controlling the creation of substratus IRSA roles" + type = bool + default = true +} + +variable "existing_artifacts_bucket" { + description = "An existing artifacts bucket to use for this substratus install." + type = object({ + id = string + arn = string + }) + default = null +} + +variable "existing_ecr_repository_arn" { + description = "The ARN of an existing ECR repository to use instead of creating a new one" + type = string + default = "" +} + +variable "existing_eks_cluster" { + description = "An existing EKS cluster to add substratus components to." + type = object({ + name = string + oidc_provider_arn = string + }) + default = null +} + +variable "existing_vpc" { + description = "An existing VPC to add substratus components to." + type = object({ + id = string + private_subnet_ids = list(string) + intra_subnet_ids = list(string) + }) + default = null +} + +variable "image_scan_on_push" { + type = bool + default = false + description = "Scan images for vulnerabilities on push to ECR ($0.09 per scan on push)" +} + +variable "lables" { + type = map(string) + default = { + GithubRepo = "substratus" + GithubOrg = "substratusai" + } +} + +variable "name_prefix" { + description = "Prefix to use for resources" + type = string + default = "substratus-usw2" +} + +variable "region" { + description = "AWS region" + type = string + default = "us-west-2" +} + +# will remove this before pushing to substratus repo +variable "tags" { + type = map(string) + default = { + GithubRepo = "infrastructure" + GithubOrg = "substratusai" + } +} + +variable "vpc_cidr" { + description = "The cidr block of the VPC if created by the module (e.g., used when var.existing_vpc is null)" + type = string + default = "10.0.0.0/16" +} diff --git a/install/terraform/aws/vpc.tf b/install/terraform/aws/vpc.tf new file mode 100644 index 00000000..32792ccb --- /dev/null +++ b/install/terraform/aws/vpc.tf @@ -0,0 +1,116 @@ +data "aws_availability_zones" "available" {} + +locals { + azs = slice(data.aws_availability_zones.available.names, 0, 3) + create_vpc = var.existing_vpc == null ? 1 : 0 +} + +module "vpc" { + count = local.create_vpc + source = "terraform-aws-modules/vpc/aws" + version = "5.1.1" + name = var.name_prefix + cidr = var.vpc_cidr + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(var.vpc_cidr, 6, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(var.vpc_cidr, 6, k + 4)] + intra_subnets = [for k, v in local.azs : cidrsubnet(var.vpc_cidr, 6, k + 20)] + + public_subnet_ipv6_prefixes = [0, 1, 2] + public_subnet_assign_ipv6_address_on_creation = true + private_subnet_ipv6_prefixes = [3, 4, 5] + private_subnet_assign_ipv6_address_on_creation = true + intra_subnet_ipv6_prefixes = [6, 7, 8] + intra_subnet_assign_ipv6_address_on_creation = true + + public_subnet_tags = { + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/role/internal-elb" = 1 + } + + create_database_subnet_group = false + manage_default_network_acl = false + manage_default_route_table = false + manage_default_security_group = false + + enable_dns_hostnames = true + enable_dns_support = true + enable_nat_gateway = true + single_nat_gateway = true + enable_ipv6 = true + create_egress_only_igw = true + enable_vpn_gateway = false + enable_dhcp_options = false + + # VPC Flow Logs (Cloudwatch log group and IAM role will be created) + enable_flow_log = false + create_flow_log_cloudwatch_log_group = true + create_flow_log_cloudwatch_iam_role = true + flow_log_max_aggregation_interval = 60 + tags = var.tags +} + + +# VPC Endpoints Module + +module "endpoints" { + count = local.create_vpc + source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints" + version = "5.1.1" + vpc_id = module.vpc[0].vpc_id + create_security_group = true + security_group_name_prefix = "${var.name_prefix}-endpoints-" + security_group_description = "VPC endpoint security group" + security_group_rules = { + ingress_https = { + description = "HTTPS from VPC" + cidr_blocks = [module.vpc[0].vpc_cidr_block] + } + } + + endpoints = { + s3 = { + service = "s3" + tags = { Name = "s3-vpc-endpoint" } + }, + ecr_api = { + service = "ecr.api" + private_dns_enabled = true + subnet_ids = module.vpc[0].private_subnets + policy = data.aws_iam_policy_document.generic_endpoint_policy[0].json + }, + ecr_dkr = { + service = "ecr.dkr" + private_dns_enabled = true + subnet_ids = module.vpc[0].private_subnets + policy = data.aws_iam_policy_document.generic_endpoint_policy[0].json + }, + } + + tags = merge(var.tags, { + Endpoint = "true" + }) +} + +data "aws_iam_policy_document" "generic_endpoint_policy" { + count = local.create_vpc + statement { + effect = "Deny" + actions = ["*"] + resources = ["*"] + + principals { + type = "*" + identifiers = ["*"] + } + + condition { + test = "StringNotEquals" + variable = "aws:SourceVpc" + values = [module.vpc[0].vpc_id] + } + } +}