From 6a0ec41b734b17a837aeb3ffc1df727053ec5d2b Mon Sep 17 00:00:00 2001
From: LeoDiazL <116020815+LeoDiazL@users.noreply.github.com>
Date: Tue, 8 Aug 2023 11:44:11 -0300
Subject: [PATCH] Add VPC handling (#32)
* Initial fixes
* Small unnecesary printing fixes
* small tweak in bitops incoming
* Remove wiping of bitops_extra_env_vars
* Fixing env-merger
* Initial commit - debugging subnets fetching
* Fixing typo
* fixing indexes
* Testing #2
* Missing id
* fixing typo
* Forcing subnet for vpc
* Clearing out output bugging message
* Moving files
* Fixes
* typo
* Outputs deps fix
* fixing index
* random_int fix
* Debugging 1
* Fixes
* Fix typo
* debugg
* trying a failing one
* tonumber
* +dep
* debugging
* adding debugg outputs
* fixing expected value
* Cleanup
* Cleanup + moving secret manager get
* Fix expected empty value
* Code cleanup
* Adding lifecycle block to keys in sm
* Removing ugly sed command
* adding random back again
* Missing var
* Fixing efs zone issue
* Fixing efs zone mapping issue #2
Fixing output prints
* Fixing ec2_zone_mapping
* Typo in var name
* Debug EFS+VPC
* Choosing the default subnet for the VPC
* adding debugs to main
* Missing mapping of values
* Debugging summary
* Changing VPC logic
* Debugging
* typo
* Debugging 2
* Debugging 3
* fix subnet issues
* typo in resource
* breaking loop
* Fixing cycle
* typo fix
* typo fix 2
* Adding subnet def'n
* wrong conditional location
* typos fix
* Fixes
* Fixing ELB Subnet/VPC/AZ
* Fixing VPC Id in security group
* Missing file in commit
* Chaging attribute name
* Commenting out AZ from ELB
* Trying to get rid of dep loop
* changing conditional order
* Changing region per zone
* Fixing AZ conditional creation
* Changing az logic #2
* Debug #55
* Trying to break loops
* Break the look #2
* Fixing zones loop
* Missing index
* fixing outputs
* Playing with EC2 AZ's
* commenting out dep
* Cross-fixing
* changing set substract
* slice sort fix
* set
* Changed az approach again
* cleanup
* Retrying indexes
* Typo in var name
* Fixing README
* Fixing az index to ec2
* Adding comment in readme, fixing outputs
* Output fixing
* Fixing summary
* Missing "
* Deboug outputs cleanup
* Debugging EFS DNS URL
* Debug 2
* Huge EFS Changes
* Fixing some ()
* Really, another '
* Fixes to vars typos and indexes
* cidr != cidr_block
* removing breaking unnecesary output
* Fixing string to list
* Testing different approach
* Missing index
* Approach #2
* typo
* Escaping var
* region-namme doesn't exists
* Adding a validation
* Validation fix
* Removing filtering for only one VPC per ZAZ
* Adding EFS deps
* Fixing and filtering
* Cleanup
* fix
* Changing to ID
* Clearer passthrough
* Adding try
* Adding VPC def'n as target
* Making main only a target
* Making all vpc's targets
* Adding subnets?
* removing for_each
* Fixing azs
* removing count
* Fixing unnecesary count
* typo
* target vpcs and subnets
* Cleanups
* Adding dep
* typo
* option 2
* Module VPC to run first
* Adding allow-sg to aurora
* Adding missing files in commit
* dupe cleanup
* Adding missing mapping var
* fixing data source name
* Changing aurora VPC
* Fixing SG
* Cleanup and vpc target dependant
* Adding timeout and lifecycle to aurora sg
* Adding aurora db lifecycle tag
* Rollback
* Cleanup
* Cleanup
---
README.md | 52 ++++-
action.yaml | 69 +++++--
operations/_scripts/deploy/summary.sh | 48 ++++-
.../generate/generate_bitops_config.sh | 7 +-
.../generate/generate_vars_terraform.sh | 38 +++-
.../terraform/aws/aws_default_tags.tf | 12 --
.../aws/aws_default_vpc_subnet_sg.tf | 46 -----
.../terraform/aws/aws_dotenv_secretmanager.tf | 22 ---
.../deployment/terraform/aws/aws_variables.tf | 87 +++++++--
.../bitops.after-deploy.d/generate-outputs.sh | 4 -
.../deployment/terraform/aws/bitovi_main.tf | 113 ++++++-----
.../aws/ansible/aws_ansible_inventory.tf | 21 +-
.../aws/ansible/aws_ansible_inventory_vars.tf | 3 +-
.../modules/aws/aurora/aws_aurora.tf | 15 +-
.../modules/aws/aurora/aws_aurora_vars.tf | 6 +-
.../terraform/modules/aws/ec2/aws_ec2.tf | 21 +-
.../modules/aws/ec2_efs/aws_ec2_efs vars.tf | 23 ---
.../modules/aws/ec2_efs/aws_ec2_efs.tf | 50 -----
.../terraform/modules/aws/efs/aws_efs.tf | 180 ++++++++++++++++--
.../terraform/modules/aws/efs/aws_efs_vars.tf | 21 +-
.../terraform/modules/aws/elb/aws_elb.tf | 9 +-
.../terraform/modules/aws/elb/aws_elb_vars.tf | 2 +
.../aws_secretmanager_get.tf | 15 ++
.../aws_secretmanager_get_vars.tf | 1 +
.../terraform/modules/aws/vpc/aws_vpc.tf | 154 +++++++++++++++
.../aws/vpc/aws_vpc_azs.tf} | 102 +++++++---
.../terraform/modules/aws/vpc/aws_vpc_vars.tf | 15 ++
27 files changed, 801 insertions(+), 335 deletions(-)
delete mode 100644 operations/deployment/terraform/aws/aws_default_tags.tf
delete mode 100644 operations/deployment/terraform/aws/aws_default_vpc_subnet_sg.tf
delete mode 100644 operations/deployment/terraform/aws/aws_dotenv_secretmanager.tf
delete mode 100644 operations/deployment/terraform/modules/aws/ec2_efs/aws_ec2_efs vars.tf
delete mode 100644 operations/deployment/terraform/modules/aws/ec2_efs/aws_ec2_efs.tf
create mode 100644 operations/deployment/terraform/modules/aws/secretmanager_get/aws_secretmanager_get.tf
create mode 100644 operations/deployment/terraform/modules/aws/secretmanager_get/aws_secretmanager_get_vars.tf
create mode 100644 operations/deployment/terraform/modules/aws/vpc/aws_vpc.tf
rename operations/deployment/terraform/{aws/aws_default_azs.tf => modules/aws/vpc/aws_vpc_azs.tf} (58%)
create mode 100644 operations/deployment/terraform/modules/aws/vpc/aws_vpc_vars.tf
diff --git a/README.md b/README.md
index 0e0253e3..de0c95a4 100644
--- a/README.md
+++ b/README.md
@@ -53,11 +53,13 @@ jobs:
1. [AWS Specific](#aws-specific)
1. [Secrets and Environment Variables](#secrets-and-environment-variables-inputs)
1. [EC2](#ec2-inputs)
+1. [VPC](#vpc-inputs)
1. [Certificates](#certificate-inputs)
1. [Load Balancer](#load-balancer-inputs)
1. [EFS](#efs-inputs)
1. [Amazon Aurora Inputs](#aurora-inputs)
1. [Docker](#docker-inputs)
+1. [EKS](#eks-inputs)
The following inputs can be used as `step.with` keys
@@ -146,6 +148,19 @@ The following inputs can be used as `step.with` keys
+#### **VPC Inputs**
+| Name | Type | Description |
+|------------------|---------|------------------------------------|
+| `aws_vpc_create` | Boolean | Define if a VPC should be created |
+| `aws_vpc_name` | String | Define a name for the VPC. Defaults to `VPC for ${aws_resource_identifier}`. |
+| `aws_vpc_cidr_block` | String | Define Base CIDR block which is divided into subnet CIDR blocks. Defaults to `10.0.0.0/16`. |
+| `aws_vpc_public_subnets` | String | Comma separated list of public subnets. Defaults to `10.10.110.0/24`|
+| `aws_vpc_private_subnets` | String | Comma separated list of private subnets. If no input, no private subnet will be created. Defaults to ``. |
+| `aws_vpc_availability_zones` | String | Comma separated list of availability zones. Defaults to `aws_default_region+` value. If a list is defined, the first zone will be the one used for the EC2 instance. |
+| `aws_vpc_id` | String | AWS VPC ID. Accepts `vpc-###` values. |
+| `aws_vpc_subnet_id` | String | AWS VPC Subnet ID. If none provided, will pick one. (Ideal when there's only one) |
+
+
#### **Certificate Inputs**
| Name | Type | Description |
@@ -179,14 +194,14 @@ The following inputs can be used as `step.with` keys
|------------------|---------|------------------------------------|
| `aws_efs_create` | Boolean | Toggle to indicate whether to create and EFS and mount it to the ec2 as a part of the provisioning. Note: The EFS will be managed by the stack and will be destroyed along with the stack |
| `aws_efs_create_ha` | Boolean | Toggle to indicate whether the EFS resource should be highly available (target mounts in all available zones within region) |
-| `aws_efs_mount_id` | String | ID of existing EFS. |
-| `aws_efs_mount_security_group_id` | String | ID of the primary security group used by the existing EFS. |
+| `aws_efs_fs_id` | String | ID of existing EFS. |
+| `aws_efs_vpc_id` | String | ID of the VPC for the EFS mount target. If aws_efs_create_ha is set to true, will create one mount target per subnet available in the VPC. If not, will create one in an automated selected region. |
+| `aws_efs_subnet_ids` | String | ID (or ID's) of the subnet for the EFS mount target. (Comma separated string.) |
| `aws_efs_security_group_name` | String | The name of the EFS security group. Defaults to `SG for ${aws_resource_identifier} - EFS`. |
| `aws_efs_create_replica` | Boolean | Toggle to indiciate whether a read-only replica should be created for the EFS primary file system |
+| `aws_efs_replication_destination` | String | AWS Region to target for replication. |
| `aws_efs_enable_backup_policy` | Boolean | Toggle to indiciate whether the EFS should have a backup policy |
-| `aws_efs_zone_mapping` | JSON | Zone Mapping in the form of `{\"\":{\"subnet_id\":\"subnet-abc123\", \"security_groups\":\[\"sg-abc123\"\]} }` |
| `aws_efs_transition_to_inactive` | String | Indicates how long it takes to transition files to the IA storage class. |
-| `aws_efs_replication_destination` | String | AWS Region to target for replication. |
| `aws_efs_mount_target` | String | Directory path in efs to mount directory to. Default is `/`. |
| `aws_efs_ec2_mount_point` | String | The aws_efs_ec2_mount_point input represents the folder path within the EC2 instance to the data directory. Default is `/user/ubuntu//data`. Additionally this value is loaded into the docker-compose `.env` file as `HOST_DIR`. |
@@ -224,6 +239,35 @@ The following inputs can be used as `step.with` keys
| `docker_efs_mount_target` | String | Directory path within docker env to mount directory to. Default is `/data`|
+
+#### **EKS Inputs**
+| Name | Type | Description |
+|------------------|---------|------------------------------------|
+| `aws_eks_create` | Boolean | Define if an EKS cluster should be created |
+| `aws_eks_region` | String | Define the region where EKS cluster should be created. Defaults to `us-east-1`. |
+| `aws_eks_security_group_name_master` | String | Define the security group name master. Defaults to `SG for ${GITHUB_ORG_NAME}-${GITHUB_REPO_NAME}-${GITHUB_BRANCH_NAME} - ${aws_eks_environment} - EKS Master`. |
+| `aws_eks_security_group_name_worker` | String | Define the security group name worker. Defaults to `SG for ${GITHUB_ORG_NAME}-${GITHUB_REPO_NAME}-${GITHUB_BRANCH_NAME} - ${aws_eks_environment} - EKS Worker`. |
+| `aws_eks_environment` | String | Specify the eks environment name. Defaults to `env` |
+| `aws_eks_stackname` | String | Specify the eks stack name for your environment. Defaults to `eks-stack`. |
+| `aws_eks_cidr_block` | String | Define Base CIDR block which is divided into subnet CIDR blocks. Defaults to `10.0.0.0/16`. |
+| `aws_eks_workstation_cidr` | String | Comma separated list of remote public CIDRs blocks to add it to Worker nodes security groups. |
+| `aws_eks_availability_zones` | String | Comma separated list of availability zones. Defaults to `us-east-1a,us-east-1b`. |
+| `aws_eks_private_subnets` | String | Comma separated list of private subnets. Defaults to `10.0.1.0/24,10.0.2.0/24`. |
+| `aws_eks_public_subnets` | String | Comma separated list of public subnets. Defaults to `10.0.101.0/24,10.0.102.0/24`|
+| `aws_eks_cluster_name` | String | Specify the k8s cluster name. Defaults to `${GITHUB_ORG_NAME}-${GITHUB_REPO_NAME}-${GITHUB_BRANCH_NAME}-cluster` |
+| `aws_eks_cluster_log_types` | String | Comma separated list of cluster log type. See [this AWS doc](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). Defaults to `none`. |
+| `aws_eks_cluster_version` | String | Specify the k8s cluster version. Defaults to `1.27` |
+| `aws_eks_instance_type` | String | Define the EC2 instance type. See [this list](https://aws.amazon.com/ec2/instance-types/) for reference. Defaults to `t3a.medium`. |
+| `aws_eks_instance_ami_id` | String | AWS AMI ID. Will default to the latest Amazon EKS Node image for the cluster version. |
+| `aws_eks_instance_user_data_file` | String | Relative path in the repo for a user provided script to be executed with the EC2 Instance creation. See note. |
+| `aws_eks_ec2_key_pair` | String | Enter an existing ec2 key pair name for worker nodes. If none, will create one. |
+| `aws_eks_store_keypair_sm` | Boolean | If true, will store the newly created keys in Secret Manager. |
+| `aws_eks_desired_capacity` | String | Enter the desired capacity for the worker nodes. Defaults to `2`. |
+| `aws_eks_max_size` | String | Enter the max_size for the worker nodes. Defaults to `4`. |
+| `aws_eks_min_size` | String | Enter the min_size for the worker nodes. Defaults to `2`. |
+| `input_helm_charts` | String | Relative path to the folder from project containing Helm charts to be installed. Could be uncompressed or compressed (.tgz) files. |
+
+
## Note about resource identifiers
diff --git a/action.yaml b/action.yaml
index c8f8fab8..ad83c6cb 100644
--- a/action.yaml
+++ b/action.yaml
@@ -165,7 +165,33 @@ inputs:
aws_ec2_user_data_replace_on_change:
description: 'If user_data file changes, instance will stop and start. Hence public IP will change. Defaults to true.'
required: false
-
+
+ # AWS VPC Inputs
+ aws_vpc_create:
+ description: 'Define if a VPC should be created'
+ required: false
+ aws_vpc_name:
+ description: 'Set a specific name for the VPC'
+ required: false
+ aws_vpc_cidr_block:
+ description: 'Define Base CIDR block which is divided into subnet CIDR blocks. Defaults to 10.0.0.0/16.'
+ required: false
+ aws_vpc_public_subnets:
+ description: 'Comma separated list of public subnets. Defaults to 10.10.110.0/24'
+ required: false
+ aws_vpc_private_subnets:
+ description: 'Comma separated list of private subnets. If none, none will be created.'
+ required: false
+ aws_vpc_availability_zones:
+ description: 'Comma separated list of availability zones. Defaults to `aws_default_region.'
+ required: false
+ aws_vpc_id:
+ description: 'AWS VPC ID. Accepts `vpc-###` values.'
+ required: false
+ aws_vpc_subnet_id:
+ description: 'Specify a Subnet to be used with the instance. If none provided, will pick one.'
+ required: false
+
# AWS Route53 Domains abd Certificates
aws_r53_enable:
description: 'Enables the usage of Route53 to manage DNS records.'
@@ -221,30 +247,29 @@ inputs:
aws_efs_create_ha:
description: 'Toggle to indicate whether the EFS resource should be highly available (target mounts in all available zones within region)'
required: false
- aws_efs_mount_id:
+ aws_efs_fs_id:
description: 'ID of existing EFS'
required: false
- aws_efs_mount_security_group_id:
- description: 'ID of the primary security group used by the existing EFS'
+ aws_efs_vpc_id:
+ description: 'ID of the VPC for the EFS mount target. If aws_efs_create_ha is set to true, will create one mount target per subnet available in the VPC. If not, will pick one.'
required: false
+ aws_efs_subnet_ids:
+ description: 'ID or IDs of the subnet for the EFS mount target.'
aws_efs_security_group_name:
description: 'The name of the EFS security group'
required: false
aws_efs_create_replica:
description: 'Toggle to indiciate whether a read-only replica should be created for the EFS primary file system'
required: false
+ aws_efs_replication_destination:
+ description: 'AWS Region to target for replication'
+ required: false
aws_efs_enable_backup_policy:
description: 'Toggle to indiciate whether the EFS should have a backup policy, default is false'
required: false
- aws_efs_zone_mapping:
- description: 'Information on Zone Mapping can be found in the [README.md](README.md#efs-zone-mapping)'
- required: false
aws_efs_transition_to_inactive:
description: 'Indicates how long it takes to transition files to the IA storage class.'
required: false
- aws_efs_replication_destination:
- description: 'AWS Region to target for replication'
- required: false
aws_efs_mount_target:
description: 'Directory path in the EFS volume to mount directory to. Default is /.'
required: false
@@ -391,6 +416,9 @@ outputs:
vm_url:
description: "The URL of the generated app"
value: ${{ steps.deploy.outputs.vm_url }}
+ ec2_url:
+ description: "The URL of the generated ec2 instance"
+ value: ${{ steps.deploy.outputs.instance_public_dns }}
runs:
using: 'composite'
@@ -466,6 +494,16 @@ runs:
AWS_EC2_USER_DATA_FILE: ${{ inputs.aws_ec2_user_data_file }}
AWS_EC2_USER_DATA_REPLACE_ON_CHANGE: ${{ inputs.aws_ec2_user_data_replace_on_change }}
+ ## AWS VPC
+ AWS_VPC_CREATE: ${{ inputs.aws_vpc_create }}
+ AWS_VPC_NAME: ${{ inputs.aws_vpc_name }}
+ AWS_VPC_CIDR_BLOCK: ${{ inputs.aws_vpc_cidr_block }}
+ AWS_VPC_PUBLIC_SUBNETS: ${{ inputs.aws_vpc_public_subnets }}
+ AWS_VPC_PRIVATE_SUBNETS: ${{ inputs.aws_vpc_private_subnets }}
+ AWS_VPC_AVAILABILITY_ZONES: ${{ inputs.aws_vpc_availability_zones }}
+ AWS_VPC_ID: ${{ inputs.aws_vpc_id }}
+ AWS_VPC_SUBNET_ID: ${{ inputs.aws_vpc_subnet_id }}
+
# AWS Route53 Domains abd Certificates
AWS_R53_ENABLE: ${{ inputs.aws_r53_enable }}
AWS_R53_DOMAIN_NAME: ${{ inputs.aws_r53_domain_name }}
@@ -488,14 +526,14 @@ runs:
# AWS EFS
AWS_EFS_CREATE: ${{ inputs.aws_efs_create }}
AWS_EFS_CREATE_HA: ${{ inputs.aws_efs_create_ha }}
- AWS_EFS_MOUNT_ID: ${{ inputs.aws_efs_mount_id }}
- AWS_EFS_MOUNT_SECURITY_GROUP_ID: ${{ inputs.aws_efs_mount_security_group_id }}
+ AWS_EFS_FS_ID: ${{ inputs.aws_efs_fs_id }}
+ AWS_EFS_VPC_ID: ${{ inputs.aws_efs_vpc_id }}
+ AWS_EFS_SUBNET_IDS: ${{ inputs.aws_efs_subnet_ids }}
AWS_EFS_SECURITY_GROUP_NAME: ${{ inputs.aws_efs_security_group_name }}
AWS_EFS_CREATE_REPLICA: ${{ inputs.aws_efs_create_replica }}
+ AWS_EFS_REPLICATION_DESTINATION: ${{ inputs.aws_efs_replication_destination }}
AWS_EFS_ENABLE_BACKUP_POLICY: ${{ inputs.aws_efs_enable_backup_policy }}
- AWS_EFS_ZONE_MAPPING: ${{ inputs.aws_efs_zone_mapping }}
AWS_EFS_TRANSITION_TO_INACTIVE: ${{ inputs.aws_efs_transition_to_inactive }}
- AWS_EFS_REPLICATION_DESTINATION: ${{ inputs.aws_efs_replication_destination }}
AWS_EFS_MOUNT_TARGET: ${{ inputs.aws_efs_mount_target }}
AWS_EFS_EC2_MOUNT_POINT: ${{ inputs.aws_efs_ec2_mount_point }}
@@ -562,10 +600,13 @@ runs:
env:
SUCCESS: ${{ job.status }} # success, failure, cancelled
URL_OUTPUT: ${{ steps.deploy.outputs.vm_url }}
+ EC2_URL_OUTPUT: ${{ steps.deploy.outputs.ec2_url }}
BITOPS_CODE_ONLY: ${{ inputs.bitops_code_only }}
BITOPS_CODE_STORE: ${{ inputs.bitops_code_store }}
TF_STACK_DESTROY: ${{ inputs.tf_stack_destroy }}
TF_STATE_BUCKET_DESTROY: ${{ inputs.tf_state_bucket_destroy }}
+ AWS_EC2_PORT_LIST: ${{ inputs.aws_ec2_port_list }}
+ AWS_ELB_LISTEN_PORT: ${{ inputs.aws_elb_listen_port }}
run: $GITHUB_ACTION_PATH/operations/_scripts/deploy/summary.sh
# upload generated artifacts to GitHub if enabled
diff --git a/operations/_scripts/deploy/summary.sh b/operations/_scripts/deploy/summary.sh
index f51c2be1..4ee9b1c4 100755
--- a/operations/_scripts/deploy/summary.sh
+++ b/operations/_scripts/deploy/summary.sh
@@ -4,10 +4,13 @@
### coming into this we have env vars:
# SUCCESS=${{ job.status }} # success, cancelled, failure
# URL_OUTPUT=${{ steps.deploy.outputs.vm_url }}
+# EC2_URL_OUTPUT=${{ steps.deploy.outputs.ec2_url }}
# BITOPS_CODE_ONLY
# BITOPS_CODE_STORE
# TF_STACK_DESTROY
# TF_STATE_BUCKET_DESTROY
+# AWS_EC2_PORT_LIST
+# AWS_ELB_LISTEN_PORT
# Create an error code mechanism so we don't have to check the actual static text,
# just which case we fell into
@@ -24,13 +27,42 @@
# 9 - success, destroy infrastructure
# 10 - cancelled
+# Function to process and return the result as a string
+function process_and_return() {
+ local url="$1"
+ local ports="$2"
+ IFS=',' read -ra port_array <<< "$ports"
+ result=""
+ for p in "${port_array[@]}"; do
+ result+="$url:$p\n"
+ done
+ echo -e "$result"
+}
+
+# Function to echo each line of a given variable
+echo_lines() {
+ local input="$1"
+ while IFS= read -r line; do
+ echo -e "$line" >> $GITHUB_STEP_SUMMARY
+ done <<< "$input"
+}
+
+# Process and store URL_OUTPUT:AWS_ELB_LISTEN_PORT in a variable
+output_elb=$(process_and_return "$URL_OUTPUT" "$AWS_ELB_LISTEN_PORT")
+# Given the case where there is no port specified for the ELB, pass the URL directly
+if [[ -z "$output_elb" ]]; then
+ output_elb="$URL_OUTPUT"
+fi
+final_output+="${output_elb}\n"
+# Process and store EC2_URL_OUTPUT:AWS_EC2_PORT_LIST in a variable
+output_ec2=$(process_and_return "$EC2_URL_OUTPUT" "$AWS_EC2_PORT_LIST")
+final_output+="${output_ec2}\n"
+
SUMMARY_CODE=0
if [[ $SUCCESS == 'success' ]]; then
if [[ $URL_OUTPUT != '' ]]; then
- result_string="## Deploy Complete! :rocket:
- $URL_OUTPUT"
-
+ result_string="## Deploy Complete! :rocket:"
elif [[ $BITOPS_CODE_ONLY == 'true' ]]; then
if [[ $BITOPS_CODE_STORE == 'true' ]]; then
SUMMARY_CODE=6
@@ -72,5 +104,11 @@ else
If you consider this is a bug in the Github Action, please submit an issue to our repo."
fi
-echo "$result_string" >> $GITHUB_STEP_SUMMARY
-echo "SUMMARY_CODE=$SUMMARY_CODE" >> $GITHUB_OUTPUT
+echo -e "$result_string" >> $GITHUB_STEP_SUMMARY
+if [[ $SUCCESS == 'success' ]]; then
+ if [[ $URL_OUTPUT != '' ]]; then
+ while IFS= read -r line; do
+ echo -e "$line" >> $GITHUB_STEP_SUMMARY
+ done <<< "$final_output"
+ fi
+fi
diff --git a/operations/_scripts/generate/generate_bitops_config.sh b/operations/_scripts/generate/generate_bitops_config.sh
index 0f299738..3f5eb489 100755
--- a/operations/_scripts/generate/generate_bitops_config.sh
+++ b/operations/_scripts/generate/generate_bitops_config.sh
@@ -77,7 +77,12 @@ if [ -n "$TF_TARGETS" ]; then
fi
# random_integer.az_select needs to be created before the "full stack" to avoid a potential state dependency locks
targets="$targets
- - random_integer.az_select"
+ - module.vpc.random_integer.az_select"
+# In the case VPC creation is enabled, as it's a needed resource for the whole stack, will trigger creation first.
+if [[ $(alpha_only "$AWS_VPC_CREATE") == true ]]; then
+targets="$targets
+ - module.vpc"
+fi
targets_attribute="$targets_attribute $targets"
#Will add the user_data file into the EC2 Terraform folder
diff --git a/operations/_scripts/generate/generate_vars_terraform.sh b/operations/_scripts/generate/generate_vars_terraform.sh
index 16f7d381..0832646c 100644
--- a/operations/_scripts/generate/generate_vars_terraform.sh
+++ b/operations/_scripts/generate/generate_vars_terraform.sh
@@ -130,6 +130,18 @@ if [[ $(alpha_only "$AWS_EC2_INSTANCE_CREATE") == true ]]; then
aws_ec2_user_data_replace_on_change=$(generate_var aws_ec2_user_data_replace_on_change $AWS_EC2_USER_DATA_REPLACE_ON_CHANGE)
fi
+#-- VPC Handling --#
+if [[ $(alpha_only "$AWS_VPC_CREATE") == true ]]; then
+ aws_vpc_create=$(generate_var aws_vpc_create $AWS_VPC_CREATE)
+ aws_vpc_name=$(generate_var aws_vpc_name $AWS_VPC_NAME)
+ aws_vpc_cidr_block=$(generate_var aws_vpc_cidr_block $AWS_VPC_CIDR_BLOCK)
+ aws_vpc_public_subnets=$(generate_var aws_vpc_public_subnets $AWS_VPC_PUBLIC_SUBNETS)
+ aws_vpc_private_subnets=$(generate_var aws_vpc_private_subnets $AWS_VPC_PRIVATE_SUBNETS)
+ aws_vpc_availability_zones=$(generate_var aws_vpc_availability_zones $AWS_VPC_AVAILABILITY_ZONES)
+fi
+aws_vpc_id=$(generate_var aws_vpc_id $AWS_VPC_ID)
+aws_vpc_subnet_id=$(generate_var aws_vpc_subnet_id $AWS_VPC_SUBNET_ID)
+
#-- AWS Route53 and certs --#
if [[ $(alpha_only "$AWS_R53_ENABLE") == true ]]; then
aws_r53_enable=$(generate_var aws_r53_enable $AWS_R53_ENABLE)
@@ -161,14 +173,14 @@ if [[ $(alpha_only "$AWS_EFS_ENABLE") == true ]]; then
aws_efs_enable=$(generate_var aws_efs_enable $AWS_EFS_ENABLE)
aws_efs_create=$(generate_var aws_efs_create $AWS_EFS_CREATE)
aws_efs_create_ha=$(generate_var aws_efs_create_ha $AWS_EFS_CREATE_HA)
- aws_efs_mount_id=$(generate_var aws_efs_mount_id $AWS_EFS_MOUNT_ID)
- aws_efs_mount_security_group_id=$(generate_var aws_efs_mount_security_group_id $AWS_EFS_MOUNT_SECURITY_GROUP_ID)
+ aws_efs_fs_id=$(generate_var aws_efs_fs_id $AWS_EFS_FS_ID)
+ aws_efs_vpc_id=$(generate_var aws_efs_vpc_id $AWS_EFS_VPC_ID)
+ aws_efs_subnet_ids=$(generate_var aws_efs_subnet_ids $AWS_EFS_SUBNET_IDS)
aws_efs_security_group_name=$(generate_var aws_efs_security_group_name $AWS_EFS_SECURITY_GROUP_NAME)
aws_efs_create_replica=$(generate_var aws_efs_create_replica $AWS_EFS_CREATE_REPLICA)
+ aws_efs_replication_destination=$(generate_var aws_efs_replication_destination $AWS_EFS_REPLICATION_DESTINATION)
aws_efs_enable_backup_policy=$(generate_var aws_efs_enable_backup_policy $AWS_EFS_ENABLE_BACKUP_POLICY)
- aws_efs_zone_mapping=$(generate_var aws_efs_zone_mapping $AWS_EFS_ZONE_MAPPING)
aws_efs_transition_to_inactive=$(generate_var aws_efs_transition_to_inactive $AWS_EFS_TRANSITION_TO_INACTIVE)
- aws_efs_replication_destination=$(generate_var aws_efs_replication_destination $AWS_EFS_REPLICATION_DESTINATION)
aws_efs_mount_target=$(generate_var aws_efs_mount_target $AWS_EFS_MOUNT_TARGET)
aws_efs_ec2_mount_point=$(generate_var aws_efs_ec2_mount_point $AWS_EFS_EC2_MOUNT_POINT)
fi
@@ -265,6 +277,16 @@ $aws_ec2_create_keypair_sm
$aws_ec2_instance_public_ip
$aws_ec2_user_data_replace_on_change
+#-- VPC --#
+$aws_vpc_create
+$aws_vpc_name
+$aws_vpc_cidr_block
+$aws_vpc_public_subnets
+$aws_vpc_private_subnets
+$aws_vpc_availability_zones
+$aws_vpc_id
+$aws_vpc_subnet_id
+
#-- R53 --#
$aws_r53_enable
$aws_r53_domain_name
@@ -288,14 +310,14 @@ $lb_access_bucket_name
$aws_efs_enable
$aws_efs_create
$aws_efs_create_ha
-$aws_efs_mount_id
-$aws_efs_mount_security_group_id
+$aws_efs_fs_id
+$aws_efs_vpc_id
+$aws_efs_subnet_ids
$aws_efs_security_group_name
$aws_efs_create_replica
+$aws_efs_replication_destination
$aws_efs_enable_backup_policy
-$aws_efs_zone_mapping
$aws_efs_transition_to_inactive
-$aws_efs_replication_destination
$aws_efs_mount_target
$aws_efs_ec2_mount_point
diff --git a/operations/deployment/terraform/aws/aws_default_tags.tf b/operations/deployment/terraform/aws/aws_default_tags.tf
deleted file mode 100644
index 9d8cf385..00000000
--- a/operations/deployment/terraform/aws/aws_default_tags.tf
+++ /dev/null
@@ -1,12 +0,0 @@
-locals {
- aws_tags = {
- OperationsRepo = "bitovi/github-actions-commons/operations/${var.ops_repo_environment}"
- AWSResourceIdentifier = "${var.aws_resource_identifier}"
- GitHubOrgName = "${var.app_org_name}"
- GitHubRepoName = "${var.app_repo_name}"
- GitHubBranchName = "${var.app_branch_name}"
- GitHubAction = "bitovi/github-actions-commons"
- OperationsRepoEnvironment = "${var.ops_repo_environment}"
- Created_with = "Bitovi-BitOps"
- }
-}
\ No newline at end of file
diff --git a/operations/deployment/terraform/aws/aws_default_vpc_subnet_sg.tf b/operations/deployment/terraform/aws/aws_default_vpc_subnet_sg.tf
deleted file mode 100644
index f26a09d8..00000000
--- a/operations/deployment/terraform/aws/aws_default_vpc_subnet_sg.tf
+++ /dev/null
@@ -1,46 +0,0 @@
-# This file contains the generation of availability zones, subnets and security groups.
-# Requires:
-# - aws_ec2
-
-data "aws_vpc" "default" {
- default = true
-}
-
-data "aws_subnets" "vpc_subnets" {
- filter {
- name = "vpc-id"
-
- # todo: support a specified vpc id
- # values = [var.vpc_id ? var.vpc_id : data.aws_vpc.default.id]
- values = [data.aws_vpc.default.id]
- }
-}
-
-data "aws_region" "current" {}
-
-
-data "aws_security_group" "default" {
- filter {
- name = "group-name"
- values = ["default"]
- }
- filter {
- name = "vpc-id"
- values = [data.aws_vpc.default.id]
- }
-}
-
-output "aws_default_subnet_ids" {
- description = "The subnet ids from the default vpc"
- value = data.aws_subnets.vpc_subnets.ids
-}
-
-output "aws_region_current_name" {
- description = "The AWS Current region name"
- value = data.aws_region.current.name
-}
-
-output "aws_security_group_default_id" {
- description = "The AWS Default SG Id"
- value = data.aws_security_group.default.id
-}
\ No newline at end of file
diff --git a/operations/deployment/terraform/aws/aws_dotenv_secretmanager.tf b/operations/deployment/terraform/aws/aws_dotenv_secretmanager.tf
deleted file mode 100644
index fddae880..00000000
--- a/operations/deployment/terraform/aws/aws_dotenv_secretmanager.tf
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file will create a key=value file with an AWS Secret stored in AWS Secret Manager
-# With a JSON style of "{"key1":"value1","key2":"value2"}"
-
-locals {
- secret_provided = (var.env_aws_secret != null ? true : false)
-}
-
-locals {
- s3_secret_raw = local.secret_provided ? nonsensitive(jsondecode(data.aws_secretsmanager_secret_version.env_secret[0].secret_string)) : {}
- s3_secret_string = local.secret_provided ? join("\n", [for k, v in local.s3_secret_raw : "${k}=\"${v}\""]) : ""
-}
-
-data "aws_secretsmanager_secret_version" "env_secret" {
- count = local.secret_provided ? 1 : 0
- secret_id = var.env_aws_secret
-}
-
-resource "local_file" "tf-secretdotenv" {
- count = local.secret_provided ? 1 : 0
- filename = format("%s/%s", abspath(path.root), "aws.env")
- content = local.secret_provided ? "${local.s3_secret_string}\n" : ""
-}
\ No newline at end of file
diff --git a/operations/deployment/terraform/aws/aws_variables.tf b/operations/deployment/terraform/aws/aws_variables.tf
index 5eb966f2..ba4196a9 100644
--- a/operations/deployment/terraform/aws/aws_variables.tf
+++ b/operations/deployment/terraform/aws/aws_variables.tf
@@ -21,7 +21,7 @@ variable "aws_additional_tags" {
variable "env_aws_secret" {
type = string
description = "Secret name to pull env variables from AWS Secret Manager"
- default = null
+ default = ""
}
# EC2 Instance
@@ -109,6 +109,55 @@ variable "aws_ec2_user_data_replace_on_change" {
description = "Forces destruction of EC2 instance"
}
+## AWS VPC
+variable "aws_vpc_create" {
+ type = bool
+ description = "Toggle VPC creation"
+ default = false
+}
+
+variable "aws_vpc_name" {
+ type = string
+ description = "Name for the aws vpc"
+ default = ""
+}
+
+variable "aws_vpc_id" {
+ type = string
+ description = "aws vpc id"
+ default = ""
+}
+
+variable "aws_vpc_subnet_id" {
+ type = string
+ description = "aws vpc subnet id"
+ default = ""
+}
+
+variable "aws_vpc_cidr_block" {
+ description = "CIDR of the VPC"
+ type = string
+ default = "10.10.0.0/16"
+}
+
+variable "aws_vpc_public_subnets" {
+ type = string
+ default = "10.10.110.0/24"
+ description = "A list of public subnets"
+}
+
+variable "aws_vpc_private_subnets" {
+ type = string
+ default = ""
+ description = "A list of private subnets"
+}
+
+variable "aws_vpc_availability_zones" {
+ type = string
+ default = ""
+ description = "A list of availability zones."
+}
+
# AWS Route53 Domains abd Certificates
variable "aws_r53_enable" {
type = bool
@@ -169,7 +218,7 @@ variable "aws_elb_security_group_name" {
variable "aws_elb_app_port" {
type = string
- default = "3000"
+ default = ""
description = "app port"
}
@@ -218,15 +267,21 @@ variable "aws_efs_create_ha" {
default = false
}
-variable "aws_efs_mount_id" {
+variable "aws_efs_fs_id" {
type = string
description = "ID of existing EFS"
default = null
}
-variable "aws_efs_mount_security_group_id" {
+variable "aws_efs_vpc_id" {
type = string
- description = "ID of the primary security group used by the existing EFS"
+ description = "ID of the VPC for the EFS mount target. If aws_efs_create_ha is set to true, will create one mount target per subnet available in the VPC."
+ default = null
+}
+
+variable "aws_efs_subnet_ids" {
+ type = string
+ description = "ID of the VPC for the EFS mount target. If aws_efs_create_ha is set to true, will create one mount target per subnet available in the VPC."
default = null
}
@@ -242,34 +297,24 @@ variable "aws_efs_create_replica" {
default = false
}
+variable "aws_efs_replication_destination" {
+ type = string
+ default = ""
+ description = "AWS Region to target for replication"
+}
+
variable "aws_efs_enable_backup_policy" {
type = bool
default = false
description = "Toggle to indiciate whether the EFS should have a backup policy, default is `false`"
}
-variable "aws_efs_zone_mapping" {
- type = map(object({
- subnet_id = string
- security_groups = list(string)
- }))
- description = "Zone Mapping in the form of {\"\":{\"subnet_id\":\"subnet-abc123\", \"security_groups\":[\"sg-abc123\"]} }"
- nullable = true
- default = null
-}
-
variable "aws_efs_transition_to_inactive" {
type = string
default = "AFTER_30_DAYS"
description = "https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_file_system#transition_to_ia"
}
-variable "aws_efs_replication_destination" {
- type = string
- default = null
- description = "AWS Region to target for replication"
-}
-
variable "aws_efs_mount_target" {
type = string
description = "Directory path in efs to mount to"
diff --git a/operations/deployment/terraform/aws/bitops.after-deploy.d/generate-outputs.sh b/operations/deployment/terraform/aws/bitops.after-deploy.d/generate-outputs.sh
index 88c61056..4f9097ea 100755
--- a/operations/deployment/terraform/aws/bitops.after-deploy.d/generate-outputs.sh
+++ b/operations/deployment/terraform/aws/bitops.after-deploy.d/generate-outputs.sh
@@ -34,10 +34,6 @@ AWS_INSTANCE_VM_URL="$BITOPS_EC2_VM_URL"
" > $BITOPS_ENVROOT/terraform/aws/ec2.env
fi
- if [ -n "$BITOPS_EC2_PUBLIC_IP" ] && [ "$BITOPS_ANSIBLE_SKIP_DEPLOY" != "true" ]; then
- sed -i "s/BITOPS_EC2_PUBLIC_IP/$(echo $BITOPS_EC2_PUBLIC_IP)/" ${BITOPS_ENVROOT}/terraform/aws/inventory.yaml
- cat ${BITOPS_ENVROOT}/terraform/aws/inventory.yaml
- fi
fi
echo "end terraform output for bo-out"
\ No newline at end of file
diff --git a/operations/deployment/terraform/aws/bitovi_main.tf b/operations/deployment/terraform/aws/bitovi_main.tf
index b1ed8311..9e32ecae 100644
--- a/operations/deployment/terraform/aws/bitovi_main.tf
+++ b/operations/deployment/terraform/aws/bitovi_main.tf
@@ -15,13 +15,14 @@ module "ec2" {
aws_ec2_security_group_name = var.aws_ec2_security_group_name
aws_ec2_port_list = var.aws_ec2_port_list
# Data inputs
- aws_ec2_selected_vpc_id = data.aws_vpc.default.id
- aws_subnet_selected_id = data.aws_subnet.selected[0].id
- preferred_az = local.preferred_az
+ aws_ec2_selected_vpc_id = module.vpc.aws_selected_vpc_id
+ aws_subnet_selected_id = module.vpc.aws_vpc_subnet_selected
+ preferred_az = module.vpc.preferred_az
# Others
aws_resource_identifier = var.aws_resource_identifier
aws_resource_identifier_supershort = var.aws_resource_identifier_supershort
common_tags = local.default_tags
+ depends_on = [module.vpc]
}
module "aws_certificates" {
@@ -61,7 +62,7 @@ module "aws_route53" {
module "aws_elb" {
source = "../modules/aws/elb"
count = var.aws_ec2_instance_create ? 1 : 0
- # We should have a count here, right?
+ # ELB Values
aws_elb_security_group_name = var.aws_elb_security_group_name
aws_elb_app_port = var.aws_elb_app_port
aws_elb_app_protocol = var.aws_elb_app_protocol
@@ -70,7 +71,9 @@ module "aws_elb" {
aws_elb_healthcheck = var.aws_elb_healthcheck
lb_access_bucket_name = var.lb_access_bucket_name
# EC2
- aws_instance_server_az = [local.preferred_az]
+ aws_instance_server_az = [module.vpc.preferred_az]
+ aws_vpc_selected_id = module.vpc.aws_selected_vpc_id
+ aws_vpc_subnet_selected = module.vpc.aws_vpc_subnet_selected
aws_instance_server_id = module.ec2[0].aws_instance_server_id
aws_elb_target_sg_id = module.ec2[0].aws_security_group_ec2_sg_id
# Certs
@@ -79,56 +82,34 @@ module "aws_elb" {
aws_resource_identifier = var.aws_resource_identifier
aws_resource_identifier_supershort = var.aws_resource_identifier_supershort
common_tags = local.default_tags
+ depends_on = [module.vpc,module.ec2]
}
module "efs" {
source = "../modules/aws/efs"
- count = local.create_efs ? 1 : 0
-# EFS
- aws_efs_replication_destination = var.aws_efs_replication_destination
- aws_efs_transition_to_inactive = var.aws_efs_transition_to_inactive
+ count = var.aws_efs_enable ? 1 : 0
+ # EFS
+ aws_efs_create = var.aws_efs_create
+ aws_efs_create_ha = var.aws_efs_create_ha
+ aws_efs_fs_id = var.aws_efs_fs_id
+ aws_efs_vpc_id = var.aws_efs_vpc_id
+ aws_efs_subnet_ids = var.aws_efs_subnet_ids
aws_efs_security_group_name = var.aws_efs_security_group_name
- aws_efs_enable_backup_policy = var.aws_efs_enable_backup_policy
aws_efs_create_replica = var.aws_efs_create_replica
- # EC2
- aws_ec2_instance_create = var.aws_ec2_instance_create
+ aws_efs_replication_destination = var.aws_efs_replication_destination
+ aws_efs_enable_backup_policy = var.aws_efs_enable_backup_policy
+ aws_efs_transition_to_inactive = var.aws_efs_transition_to_inactive
# VPC inputs
- aws_vpc_id = data.aws_vpc.default.id
- aws_vpc_cidr_block_whitelist = data.aws_vpc.default.cidr_block
- aws_region_current_name = data.aws_region.current.name
+ aws_selected_vpc_id = module.vpc.aws_selected_vpc_id
+ aws_selected_subnet_id = module.vpc.aws_vpc_subnet_selected
+ aws_selected_az = module.vpc.preferred_az
+ aws_selected_az_list = module.vpc.availability_zones
# Others
aws_resource_identifier = var.aws_resource_identifier
common_tags = local.default_tags
+ depends_on = [module.vpc]
}
-module "ec2_efs" {
- source = "../modules/aws/ec2_efs"
- count = var.aws_ec2_instance_create && local.create_efs ? var.aws_efs_mount_id != "" ? 1 : 0 : 0
- # EFS
- aws_efs_create = var.aws_efs_create
- aws_efs_create_ha = var.aws_efs_create_ha
- aws_efs_mount_id = var.aws_efs_mount_id
- aws_efs_zone_mapping = var.aws_efs_zone_mapping
- aws_efs_ec2_mount_point = var.aws_efs_ec2_mount_point
- # Other
- ha_zone_mapping = local.ha_zone_mapping
- ec2_zone_mapping = local.ec2_zone_mapping
- # Docker
- docker_efs_mount_target = var.docker_efs_mount_target
- # Data inputs
- aws_region_current_name = data.aws_region.current.name #
- aws_security_group_efs_id = module.efs[0].aws_security_group_efs_id
- aws_efs_fs_id = module.efs[0].aws_efs_fs_id
- # Others
- common_tags = local.default_tags
- # Not exposed
- app_install_root = var.app_install_root
- app_repo_name = var.app_repo_name
- # Dependencies
- depends_on = [module.efs]
-}
-
-
module "aurora_rds" {
source = "../modules/aws/aurora"
count = var.aws_aurora_enable ? 1 : 0
@@ -148,15 +129,40 @@ module "aurora_rds" {
aws_aurora_database_protection = var.aws_aurora_database_protection
aws_aurora_database_final_snapshot = var.aws_aurora_database_final_snapshot
# Data inputs
- aws_vpc_default_id = data.aws_vpc.default.id
- aws_subnets_vpc_subnets_ids = data.aws_subnets.vpc_subnets.ids
- aws_region_current_name = data.aws_region.current.name
+ aws_allowed_sg_id = module.ec2[0].aws_security_group_ec2_sg_id
+ aws_selected_vpc_id = module.vpc.aws_selected_vpc_id
+ aws_subnets_vpc_subnets_ids = module.vpc.aws_selected_vpc_subnets
+ aws_region_current_name = module.vpc.aws_region_current_name
# Others
aws_resource_identifier = var.aws_resource_identifier
aws_resource_identifier_supershort = var.aws_resource_identifier_supershort
common_tags = local.default_tags
# Dependencies
- depends_on = [data.aws_subnets.vpc_subnets]
+ depends_on = [module.vpc]
+}
+
+module "vpc" {
+ source = "../modules/aws/vpc"
+ aws_vpc_create = var.aws_vpc_create
+ aws_vpc_id = var.aws_vpc_id
+ aws_vpc_subnet_id = var.aws_vpc_subnet_id
+ aws_vpc_cidr_block = var.aws_vpc_cidr_block
+ aws_vpc_name = var.aws_vpc_name
+ aws_vpc_public_subnets = var.aws_vpc_public_subnets
+ aws_vpc_private_subnets = var.aws_vpc_private_subnets
+ aws_vpc_availability_zones = var.aws_vpc_availability_zones
+ # Data inputs
+ aws_ec2_instance_type = var.aws_ec2_instance_type
+ aws_ec2_security_group_name = var.aws_ec2_security_group_name
+ # Others
+ aws_resource_identifier = var.aws_resource_identifier
+ common_tags = local.default_tags
+}
+
+module "secretmanager_get" {
+ source = "../modules/aws/secretmanager_get"
+ count = var.env_aws_secret != "" ? 1 : 0
+ env_aws_secret = var.env_aws_secret
}
#module "eks" {
@@ -195,6 +201,7 @@ module "aurora_rds" {
module "ansible" {
source = "../modules/aws/ansible"
count = var.aws_ec2_instance_create ? 1 : 0
+ aws_ec2_instance_ip = try(module.ec2[0].instance_public_ip,"")
aws_efs_enable = var.aws_efs_enable
app_repo_name = var.app_repo_name
app_install_root = var.app_install_root
@@ -203,7 +210,7 @@ module "ansible" {
aws_efs_ec2_mount_point = var.aws_efs_ec2_mount_point
aws_efs_mount_target = var.aws_efs_mount_target
docker_efs_mount_target = var.docker_efs_mount_target
- aws_ec2_efs_url = try(module.ec2_efs[0].efs_url,"")
+ aws_efs_fs_id = var.aws_efs_enable ? local.create_efs ? module.efs[0].aws_efs_fs_id : var.aws_efs_fs_id : null
# Data inputs
private_key_filename = module.ec2[0].private_key_filename
# Dependencies
@@ -211,6 +218,16 @@ module "ansible" {
}
locals {
+ aws_tags = {
+ OperationsRepo = "bitovi/github-actions-commons/operations/${var.ops_repo_environment}"
+ AWSResourceIdentifier = "${var.aws_resource_identifier}"
+ GitHubOrgName = "${var.app_org_name}"
+ GitHubRepoName = "${var.app_repo_name}"
+ GitHubBranchName = "${var.app_branch_name}"
+ GitHubAction = "bitovi/github-actions-commons"
+ OperationsRepoEnvironment = "${var.ops_repo_environment}"
+ Created_with = "Bitovi-BitOps"
+ }
default_tags = merge(local.aws_tags, var.aws_additional_tags)
fqdn_provided = (
(var.aws_r53_domain_name != "") ?
diff --git a/operations/deployment/terraform/modules/aws/ansible/aws_ansible_inventory.tf b/operations/deployment/terraform/modules/aws/ansible/aws_ansible_inventory.tf
index 18578eae..27c5079c 100644
--- a/operations/deployment/terraform/modules/aws/ansible/aws_ansible_inventory.tf
+++ b/operations/deployment/terraform/modules/aws/ansible/aws_ansible_inventory.tf
@@ -3,7 +3,7 @@ resource "local_file" "ansible_inventor_no_efs" {
filename = format("%s/%s", abspath(path.root), "inventory.yaml")
content = <<-EOT
bitops_servers:
- hosts: BITOPS_EC2_PUBLIC_IP
+ hosts: ${var.aws_ec2_instance_ip}
vars:
ansible_ssh_user: ubuntu
ansible_ssh_private_key_file: ${var.private_key_filename}
@@ -14,12 +14,17 @@ bitops_servers:
EOT
}
+data "aws_efs_file_system" "mount_efs" {
+ count = var.aws_efs_enable ? 1 : 0
+ file_system_id = var.aws_efs_fs_id
+}
+
resource "local_file" "ansible_inventory_efs" {
count = var.aws_efs_enable ? 1 : 0
filename = format("%s/%s", abspath(path.root), "inventory.yaml")
content = <<-EOT
bitops_servers:
- hosts: BITOPS_EC2_PUBLIC_IP
+ hosts: ${var.aws_ec2_instance_ip}
vars:
ansible_ssh_user: ubuntu
ansible_ssh_private_key_file: ${var.private_key_filename}
@@ -28,9 +33,19 @@ bitops_servers:
resource_identifier: ${var.aws_resource_identifier}
docker_remove_orphans: ${var.docker_remove_orphans}
mount_efs: true
- efs_url: ${var.aws_ec2_efs_url}
+ efs_url: ${data.aws_efs_file_system.mount_efs[0].dns_name}
aws_efs_ec2_mount_point: ${var.aws_efs_ec2_mount_point}
aws_efs_mount_target: ${var.aws_efs_mount_target != null ? var.aws_efs_mount_target : ""}
docker_efs_mount_target: ${var.docker_efs_mount_target}
EOT
+}
+
+resource "local_file" "efs-dotenv" {
+ count = var.aws_efs_enable ? 1 : 0
+ filename = format("%s/%s", abspath(path.root), "efs.env")
+ content = <<-EOT
+#### EFS
+HOST_DIR="${var.app_install_root}/${var.app_repo_name}/${var.aws_efs_ec2_mount_point}"
+TARGET_DIR="${var.docker_efs_mount_target}"
+EOT
}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/ansible/aws_ansible_inventory_vars.tf b/operations/deployment/terraform/modules/aws/ansible/aws_ansible_inventory_vars.tf
index fb022d53..e589101d 100644
--- a/operations/deployment/terraform/modules/aws/ansible/aws_ansible_inventory_vars.tf
+++ b/operations/deployment/terraform/modules/aws/ansible/aws_ansible_inventory_vars.tf
@@ -1,3 +1,4 @@
+variable "aws_ec2_instance_ip" {}
variable "aws_efs_enable" {}
variable "app_repo_name" {}
variable "app_install_root" {}
@@ -6,5 +7,5 @@ variable "docker_remove_orphans" {}
variable "aws_efs_ec2_mount_point" {}
variable "aws_efs_mount_target" {}
variable "docker_efs_mount_target" {}
-variable "aws_ec2_efs_url" {}
+variable "aws_efs_fs_id" {}
variable "private_key_filename" {}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/aurora/aws_aurora.tf b/operations/deployment/terraform/modules/aws/aurora/aws_aurora.tf
index 302d947d..31c3206f 100644
--- a/operations/deployment/terraform/modules/aws/aurora/aws_aurora.tf
+++ b/operations/deployment/terraform/modules/aws/aurora/aws_aurora.tf
@@ -1,6 +1,7 @@
resource "aws_security_group" "aurora_security_group" {
name = var.aws_aurora_security_group_name != "" ? var.aws_aurora_security_group_name : "SG for ${var.aws_resource_identifier} - Aurora"
description = "SG for ${var.aws_resource_identifier} - Aurora"
+ vpc_id = var.aws_selected_vpc_id
egress {
from_port = 0
to_port = 0
@@ -36,10 +37,11 @@ module "aurora_cluster" {
}
}
- # Todo: handle vpc/networking explicitly
- # vpc_id = var.vpc_id
- # allowed_cidr_blocks = [var.vpc_cidr]
- subnets = var.aws_aurora_subnets == null || length(var.aws_aurora_subnets) == 0 ? var.aws_subnets_vpc_subnets_ids : var.aws_aurora_subnets
+ vpc_id = var.aws_selected_vpc_id
+ subnets = var.aws_aurora_subnets == null || length(var.aws_aurora_subnets) == 0 ? var.aws_subnets_vpc_subnets_ids : var.aws_aurora_subnets
+
+ allowed_security_groups = [var.aws_allowed_sg_id]
+ allowed_cidr_blocks = [data.aws_vpc.selected[0].cidr_block]
database_name = var.aws_aurora_database_name
port = var.aws_aurora_database_port
@@ -143,4 +145,9 @@ resource "aws_db_cluster_snapshot" "overwrite_db_snapshot" {
lifecycle {
create_before_destroy = true
}
+}
+
+data "aws_vpc" "selected" {
+ count = var.aws_selected_vpc_id != null ? 1 : 0
+ id = var.aws_selected_vpc_id
}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/aurora/aws_aurora_vars.tf b/operations/deployment/terraform/modules/aws/aurora/aws_aurora_vars.tf
index 7bf76b78..0004f2d8 100644
--- a/operations/deployment/terraform/modules/aws/aurora/aws_aurora_vars.tf
+++ b/operations/deployment/terraform/modules/aws/aurora/aws_aurora_vars.tf
@@ -16,10 +16,10 @@ variable "aws_aurora_database_final_snapshot" {}
variable "aws_subnets_vpc_subnets_ids" {}
variable "aws_resource_identifier" {}
variable "aws_resource_identifier_supershort" {}
-variable "aws_vpc_default_id" {}
+variable "aws_allowed_sg_id" {}
+variable "aws_selected_vpc_id" {}
variable "aws_region_current_name" {}
variable "common_tags" {
type = map
default = {}
-}
-
+}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/ec2/aws_ec2.tf b/operations/deployment/terraform/modules/aws/ec2/aws_ec2.tf
index 49b0c933..f64bad9a 100644
--- a/operations/deployment/terraform/modules/aws/ec2/aws_ec2.tf
+++ b/operations/deployment/terraform/modules/aws/ec2/aws_ec2.tf
@@ -84,16 +84,17 @@ resource "local_sensitive_file" "private_key" {
// Creates an ec2 key pair using the tls_private_key.key public key
resource "aws_key_pair" "aws_key" {
- #key_name = "${var.aws_resource_identifier_supershort}-ec2kp-${random_string.random.result}"
- key_name = "${var.aws_resource_identifier_supershort}-ec2kp"
+ key_name = "${var.aws_resource_identifier_supershort}-ec2kp-${random_string.random.result}"
public_key = tls_private_key.key.public_key_openssh
}
// Creates a secret manager secret for the public key
resource "aws_secretsmanager_secret" "keys_sm_secret" {
count = var.aws_ec2_create_keypair_sm ? 1 : 0
- #name = "${var.aws_resource_identifier_supershort}-sm-${random_string.random.result}"
- name = "${var.aws_resource_identifier_supershort}-sm"
+ name = "${var.aws_resource_identifier_supershort}-sm-${random_string.random.result}"
+ lifecycle {
+ replace_triggered_by = [tls_private_key.key]
+ }
}
resource "aws_secretsmanager_secret_version" "keys_sm_secret_version" {
@@ -111,12 +112,12 @@ resource "aws_secretsmanager_secret_version" "keys_sm_secret_version" {
EOF
}
-#resource "random_string" "random" {
-# length = 5
-# lower = true
-# special = false
-# numeric = false
-#}
+resource "random_string" "random" {
+ length = 5
+ lower = true
+ special = false
+ numeric = false
+}
output "instance_public_dns" {
description = "Public DNS address of the EC2 instance"
diff --git a/operations/deployment/terraform/modules/aws/ec2_efs/aws_ec2_efs vars.tf b/operations/deployment/terraform/modules/aws/ec2_efs/aws_ec2_efs vars.tf
deleted file mode 100644
index dfc21838..00000000
--- a/operations/deployment/terraform/modules/aws/ec2_efs/aws_ec2_efs vars.tf
+++ /dev/null
@@ -1,23 +0,0 @@
-# EFS
-variable "aws_efs_create" {}
-variable "aws_efs_create_ha" {}
-variable "aws_efs_mount_id" {}
-variable "aws_efs_zone_mapping" {}
-variable "aws_efs_ec2_mount_point" {}
-# Other
-variable "ha_zone_mapping" {}
-variable "ec2_zone_mapping" {}
-# Docker
-variable "docker_efs_mount_target" {}
-# Data inputs
-variable "aws_region_current_name" {}
-variable "aws_security_group_efs_id" {}
-variable "aws_efs_fs_id" {}
-# Others
-variable "common_tags" {
- type = map
- default = {}
-}
-# Not exposed
-variable "app_install_root" {}
-variable "app_repo_name" {}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/ec2_efs/aws_ec2_efs.tf b/operations/deployment/terraform/modules/aws/ec2_efs/aws_ec2_efs.tf
deleted file mode 100644
index 0b2e2cf8..00000000
--- a/operations/deployment/terraform/modules/aws/ec2_efs/aws_ec2_efs.tf
+++ /dev/null
@@ -1,50 +0,0 @@
-locals {
- # user_zone_mapping: Create a zone mapping object list for all user specified zone_maps
- user_zone_mapping = var.aws_efs_zone_mapping != null ? ({
- for k, val in var.aws_efs_zone_mapping : "${var.aws_region_current_name}${k}" => val
- }) : local.no_zone_mapping
-
- create_ec2_efs = var.aws_efs_create || var.aws_efs_create_ha ? true : false
- # mount_target: Fall-Through variable that checks multiple layers of EFS zone map selection
- mount_target = var.aws_efs_zone_mapping != null ? local.user_zone_mapping : (var.aws_efs_create_ha ? var.ha_zone_mapping : (length(var.ec2_zone_mapping) > 0 ? var.ec2_zone_mapping : local.no_zone_mapping))
- # mount_efs: Fall-Through variable that checks multiple layers of EFS creation and if any of them are active, sets creation to active.
- mount_efs = var.aws_efs_mount_id != null ? true : (local.create_ec2_efs ? true : false)
- # create_mount_targets: boolean on whether to create mount_targets
- create_mount_targets = var.aws_efs_create || var.aws_efs_create_ha ? local.mount_target : {}
-}
-
-resource "aws_efs_mount_target" "efs_mount_target" {
- for_each = local.create_mount_targets
- file_system_id = var.aws_efs_fs_id
- subnet_id = each.value["subnet_id"]
- security_groups = [var.aws_security_group_efs_id]
-}
-
-# TODO: Add check for EFS/EFSHA vs. Provided Mount id.
-
-data "aws_efs_file_system" "mount_efs" {
- file_system_id = var.aws_efs_mount_id != null ? var.aws_efs_mount_id : var.aws_efs_fs_id
-}
-
-resource "local_file" "efs-dotenv" {
- count = local.create_ec2_efs ? 1 : 0
- filename = format("%s/%s", abspath(path.root), "efs.env")
- content = <<-EOT
-#### EFS
-HOST_DIR="${var.app_install_root}/${var.app_repo_name}/${var.aws_efs_ec2_mount_point}"
-TARGET_DIR="${var.docker_efs_mount_target}"
-EOT
-}
-
-locals {
- # no_zone_mapping: Creates a empty zone mapping object list
- no_zone_mapping = { "" : { "subnet_id" : "", "security_groups" : [""] } }
-}
-
-output "mount_efs" {
- value = local.mount_efs
-}
-
-output "efs_url" {
- value = data.aws_efs_file_system.mount_efs.dns_name
-}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/efs/aws_efs.tf b/operations/deployment/terraform/modules/aws/efs/aws_efs.tf
index 6d0fc6f6..b8a476a1 100644
--- a/operations/deployment/terraform/modules/aws/efs/aws_efs.tf
+++ b/operations/deployment/terraform/modules/aws/efs/aws_efs.tf
@@ -1,10 +1,12 @@
locals {
# replica_destination: Checks whether a replica destination exists otherwise sets a default
- replica_destination = var.aws_efs_replication_destination != null ? var.aws_efs_replication_destination : var.aws_region_current_name
+ replica_destination = var.aws_efs_replication_destination != "" ? var.aws_efs_replication_destination : data.aws_region.current.name
+ create_efs = var.aws_efs_create ? true : (var.aws_efs_create_ha ? true : false)
}
# ---------------------CREATE--------------------------- #
resource "aws_efs_file_system" "efs" {
+ count = local.create_efs ? 1 : 0
# File system
creation_token = "${var.aws_resource_identifier}-token-modular"
encrypted = true
@@ -18,9 +20,13 @@ resource "aws_efs_file_system" "efs" {
}
}
+data "aws_efs_file_system" "efs" {
+ file_system_id = local.create_efs ? aws_efs_file_system.efs[0].id : var.aws_efs_fs_id
+}
+
resource "aws_efs_backup_policy" "efs_policy" {
- count = var.aws_efs_enable_backup_policy ? 1 : 0
- file_system_id = aws_efs_file_system.efs.id
+ count = local.create_efs && var.aws_efs_enable_backup_policy ? 1 : 0
+ file_system_id = data.aws_efs_file_system.efs.id
backup_policy {
status = "ENABLED"
@@ -28,18 +34,57 @@ resource "aws_efs_backup_policy" "efs_policy" {
}
resource "aws_efs_replication_configuration" "efs_rep_config" {
- count = var.aws_efs_create_replica ? 1 : 0
- source_file_system_id = aws_efs_file_system.efs.id
+ count = local.create_efs && var.aws_efs_create_replica ? 1 : 0
+ source_file_system_id = data.aws_efs_file_system.efs.id
destination {
region = local.replica_destination
}
}
-resource "aws_security_group" "efs_security_group" {
- name = var.aws_efs_security_group_name != "" ? var.aws_efs_security_group_name : "SG for ${var.aws_resource_identifier} - EFS"
- description = "SG for ${var.aws_resource_identifier} - EFS"
- vpc_id = var.aws_vpc_id
+#### Defined EFS - Set up directly through the action
+resource "aws_security_group" "efs_security_group_defined" { # Incoming from EFS value
+ count = local.incoming_set ? 1 : 0
+ name = var.aws_efs_security_group_name != "" ? var.aws_efs_security_group_name : "SG for ${var.aws_resource_identifier} - EFS - Defined"
+ description = "SG for ${var.aws_resource_identifier} - EFS - Defined"
+ vpc_id = local.incoming_vpc
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ tags = {
+ Name = "${var.aws_resource_identifier}-efs-sg-def"
+ }
+}
+
+resource "aws_security_group_rule" "efs_nfs_incoming_ports_defined" { # Incoming from EFS value
+ count = local.incoming_set ? 1 : 0
+ type = "ingress"
+ description = "NFS from VPC"
+ from_port = 2049
+ to_port = 2049
+ protocol = "tcp"
+ cidr_blocks = [data.aws_vpc.incoming[0].cidr_block]
+ security_group_id = aws_security_group.efs_security_group_defined[0].id
+ depends_on = [ aws_security_group.efs_security_group_defined ]
+}
+
+resource "aws_efs_mount_target" "efs_mount_target_incoming" {
+ count = length(local.incoming_subnets)
+ file_system_id = data.aws_efs_file_system.efs.id
+ subnet_id = local.incoming_subnets[count.index]
+ security_groups = [aws_security_group.efs_security_group_defined[0].id]
+}
+####
+
+#### Action SG. Rules and Mount
+resource "aws_security_group" "efs_security_group_action" {
+ count = local.defined_set ? 1 : 0
+ name = var.aws_efs_security_group_name != "" ? var.aws_efs_security_group_name : "SG for ${var.aws_resource_identifier} - EFS - Action defined"
+ description = "SG for ${var.aws_resource_identifier} - EFS - Action defined"
+ vpc_id = var.aws_selected_vpc_id
egress {
from_port = 0
to_port = 0
@@ -47,24 +92,127 @@ resource "aws_security_group" "efs_security_group" {
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
- Name = "${var.aws_resource_identifier}-efs-sg"
+ Name = "${var.aws_resource_identifier}-efs-sg-act"
}
}
-resource "aws_security_group_rule" "efs_nfs_incoming_ports" {
+
+resource "aws_security_group_rule" "efs_nfs_incoming_ports_action" { # Selected from VPC Module
+ count = local.defined_set ? 1 : 0
type = "ingress"
description = "NFS from VPC"
from_port = 2049
to_port = 2049
protocol = "tcp"
- cidr_blocks = [var.aws_vpc_cidr_block_whitelist]
- security_group_id = aws_security_group.efs_security_group.id
+ cidr_blocks = [data.aws_vpc.selected[0].cidr_block]
+ security_group_id = aws_security_group.efs_security_group_action[0].id
+ depends_on = [ aws_security_group.efs_security_group_action ]
+}
+
+resource "aws_efs_mount_target" "efs_mount_target_action" {
+ count = length(local.module_subnets)
+ file_system_id = data.aws_efs_file_system.efs.id
+ subnet_id = local.module_subnets[count.index]
+ security_groups = [aws_security_group.efs_security_group_action[0].id]
}
-output "aws_security_group_efs_id" {
- value = aws_security_group.efs_security_group.id
+######
+# Data sources from selected (Coming from VPC module)
+
+data "aws_subnets" "selected_vpc_id" {
+ #for_each = var.aws_selected_vpc_id != null ? toset(data.aws_availability_zones.all.zone_ids) : []
+ count = var.aws_selected_vpc_id != null ? length(var.aws_selected_az_list) : 0
+ filter {
+ name = "vpc-id"
+ values = [var.aws_selected_vpc_id]
+ }
+ filter {
+ name = "availability-zone-id"
+ values = [var.aws_selected_az_list[count.index]]
+ #values = ["${each.value}"]
+ }
+}
+
+data "aws_vpc" "selected" {
+ count = var.aws_selected_vpc_id != null ? 1 : 0
+ id = var.aws_selected_vpc_id
+}
+
+# Data sources from EFS inputs
+
+data "aws_subnets" "incoming_vpc" {
+ #for_each = local.incoming_set ? toset(data.aws_availability_zones.all.zone_ids) : []
+ count = local.incoming_set ? length(var.aws_selected_az_list) : 0
+ filter {
+ name = "vpc-id"
+ values = [local.incoming_vpc]
+ }
+ filter {
+ name = "availability-zone-id"
+ values = [var.aws_selected_az_list[count.index]]
+ #values = ["${each.value}"]
+ }
+}
+
+data "aws_vpc" "incoming" {
+ count = local.incoming_set ? 1 : 0
+ id = local.incoming_vpc
+}
+
+data "aws_subnet" "incoming" {
+ count = var.aws_efs_subnet_ids != null ? 1 : 0
+ id = local.aws_efs_subnet_ids[0]
+}
+
+#####
+
+## Now to get the details of VPCs and subnets
+
+# If no HA, and don't have the subnet id, will look up in the action defined zone, filtering the VPC. If none, or more than one, it will fail.
+data "aws_subnet" "no_ha" {
+ count = var.aws_efs_create_ha ? 0 : local.incoming_vpc != null && var.aws_efs_subnet_ids == null ? 1 : 0
+ filter {
+ name = "vpc-id"
+ values = [local.incoming_vpc]
+ }
+ availability_zone = var.aws_selected_az
+}
+
+# If one or more subnets, will grab the first one to get the VPC ID of it.
+data "aws_subnet" "incoming_subnet" {
+ count = var.aws_efs_subnet_ids != null ? 1 : 0
+ id = local.aws_efs_subnet_ids[0]
+}
+
+####
+
+data "aws_region" "current" {}
+
+locals {
+ ### Incoming definitions, need a VPC or a Subnet, if nothing, false
+ incoming_set = var.aws_efs_vpc_id != null || var.aws_efs_subnet_ids != null ? true : false
+ #defined_set = var.aws_selected_vpc_id != null || var.aws_selected_subnet_id != null ? true : false
+ defined_set = true # It will always be true. If not creating a VPC, will use am existing one or the default one.
+ # Convert incoming subnets to list
+ aws_efs_subnet_ids = var.aws_efs_subnet_ids != null ? [for n in split(",", var.aws_efs_subnet_ids) : (n)] : []
+ ###
+
+ # Define the incoming VPC ID - Will try with the defined var, if not, will try to get it from the subnet.
+ incoming_vpc = var.aws_efs_vpc_id != null ? var.aws_efs_vpc_id : var.aws_efs_subnet_ids != null ? data.aws_subnet.incoming_subnet[0].vpc_id : null
+ # Make a list with the subnets defined in the action - From the VPC
+ incoming_vpc_ids = compact([for k, v in data.aws_subnets.incoming_vpc : try((v.ids[0]),null)])
+ incoming_subnets_from_vpc = var.aws_efs_create_ha ? local.incoming_vpc_ids : try([data.aws_subnet.no_ha[0].id],[]) # One or all subnets.
+ #incoming_subnets_from_vpc = var.aws_efs_create_ha ? try(data.aws_subnets.incoming_vpc[0].ids,[]) : try([data.aws_subnet.no_ha[0].id],[]) # One or all subnets.
+ # If subnet was provided, use that as a list, if not, grab the one from the VPC. Will bring only one if no HA, or the whole set.
+ incoming_subnets = var.aws_efs_subnet_ids != null ? local.aws_efs_subnet_ids : local.incoming_subnets_from_vpc
+
+ # Get the subnets
+ module_vpc_ids = compact([for k, v in data.aws_subnets.selected_vpc_id : try((v.ids[0]),null)])
+ #module_subnets = var.aws_efs_create_ha ? local.module_vpc_ids : try([var.aws_selected_subnet_id],[])
+ module_subnets = var.aws_efs_create_ha ? local.module_vpc_ids : [var.aws_selected_subnet_id]
+ #module_subnets = var.aws_efs_create_ha ? try(data.aws_subnets.selected_vpc_id[0].ids,[]) : try([var.aws_selected_subnet_id],[])
}
output "aws_efs_fs_id" {
- value = aws_efs_file_system.efs.id
+ value = data.aws_efs_file_system.efs.id
}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/efs/aws_efs_vars.tf b/operations/deployment/terraform/modules/aws/efs/aws_efs_vars.tf
index a68eb54d..1fcfc108 100644
--- a/operations/deployment/terraform/modules/aws/efs/aws_efs_vars.tf
+++ b/operations/deployment/terraform/modules/aws/efs/aws_efs_vars.tf
@@ -1,15 +1,18 @@
-#EFS
-variable "aws_efs_replication_destination" {}
-variable "aws_efs_transition_to_inactive" {}
+variable "aws_efs_create" {}
+variable "aws_efs_create_ha" {}
+variable "aws_efs_fs_id" {}
+variable "aws_efs_vpc_id" {}
+variable "aws_efs_subnet_ids" {}
variable "aws_efs_security_group_name" {}
-variable "aws_efs_enable_backup_policy" {}
variable "aws_efs_create_replica" {}
-# EC2
-variable "aws_ec2_instance_create" {}
+variable "aws_efs_replication_destination" {}
+variable "aws_efs_enable_backup_policy" {}
+variable "aws_efs_transition_to_inactive" {}
# VPC inputs
-variable "aws_vpc_id" {}
-variable "aws_vpc_cidr_block_whitelist" {}
-variable "aws_region_current_name" {}
+variable "aws_selected_vpc_id" {}
+variable "aws_selected_subnet_id" {}
+variable "aws_selected_az" {}
+variable "aws_selected_az_list" {}
# Others
variable "aws_resource_identifier" {}
variable "common_tags" {
diff --git a/operations/deployment/terraform/modules/aws/elb/aws_elb.tf b/operations/deployment/terraform/modules/aws/elb/aws_elb.tf
index e9aaf2b9..137fee11 100644
--- a/operations/deployment/terraform/modules/aws/elb/aws_elb.tf
+++ b/operations/deployment/terraform/modules/aws/elb/aws_elb.tf
@@ -46,6 +46,7 @@ resource "aws_security_group_rule" "incoming_elb" {
resource "aws_security_group" "elb_security_group" {
name = var.aws_elb_security_group_name != "" ? var.aws_elb_security_group_name : "SG for ${var.aws_resource_identifier} - ELB"
description = "SG for ${var.aws_resource_identifier} - ELB"
+ vpc_id = var.aws_vpc_selected_id
egress {
from_port = 0
to_port = 0
@@ -72,10 +73,8 @@ resource "aws_security_group_rule" "incoming_elb_ports" {
resource "aws_elb" "vm_lb" {
name = var.aws_resource_identifier_supershort
security_groups = [aws_security_group.elb_security_group.id]
- availability_zones = var.aws_instance_server_az
- # TODO - ADD VPC Handling
- # availability_zones = var.create_vpc == "true" ? null : [aws_instance.server.availability_zone]
- # subnets = var.create_vpc == "true" ? aws_subnet.public.*.id : null
+ #availability_zones = var.aws_instance_server_az
+ subnets = [var.aws_vpc_subnet_selected]
access_logs {
bucket = aws_s3_bucket.lb_access_logs.id
@@ -130,7 +129,7 @@ locals {
# Transform CSV values into arrays. ( Now variables will be called local.xx instead of var.xx )
aws_elb_listen_port = var.aws_elb_listen_port != "" ? [for n in split(",", var.aws_elb_listen_port) : tonumber(n)] : ( local.elb_ssl_available ? [443] : [80] )
aws_elb_listen_protocol = var.aws_elb_listen_protocol != "" ? [for n in split(",", var.aws_elb_listen_protocol) : (n)] : ( local.elb_ssl_available ? ["ssl"] : ["tcp"] )
- aws_elb_app_port = var.aws_elb_app_port != "" ? [for n in split(",", var.aws_elb_app_port) : tonumber(n)] : []
+ aws_elb_app_port = var.aws_elb_app_port != "" ? [for n in split(",", var.aws_elb_app_port) : tonumber(n)] : var.aws_elb_listen_port != "" ? local.aws_elb_listen_port : [3000]
aws_elb_app_protocol = var.aws_elb_app_protocol != "" ? [for n in split(",", var.aws_elb_app_protocol) : (n)] : []
# Store the lowest array length. (aws_elb_app_port will be at least 3000)
diff --git a/operations/deployment/terraform/modules/aws/elb/aws_elb_vars.tf b/operations/deployment/terraform/modules/aws/elb/aws_elb_vars.tf
index 01272124..3e9e1848 100644
--- a/operations/deployment/terraform/modules/aws/elb/aws_elb_vars.tf
+++ b/operations/deployment/terraform/modules/aws/elb/aws_elb_vars.tf
@@ -8,6 +8,8 @@ variable "aws_elb_healthcheck" {}
variable "lb_access_bucket_name" {}
variable "aws_instance_server_az" {}
+variable "aws_vpc_selected_id" {}
+variable "aws_vpc_subnet_selected" {}
variable "aws_instance_server_id" {}
variable "aws_certificates_selected_arn" {}
variable "aws_elb_target_sg_id" {}
diff --git a/operations/deployment/terraform/modules/aws/secretmanager_get/aws_secretmanager_get.tf b/operations/deployment/terraform/modules/aws/secretmanager_get/aws_secretmanager_get.tf
new file mode 100644
index 00000000..3229574a
--- /dev/null
+++ b/operations/deployment/terraform/modules/aws/secretmanager_get/aws_secretmanager_get.tf
@@ -0,0 +1,15 @@
+# This file will create a key=value file with an AWS Secret stored in AWS Secret Manager
+# With a JSON style of "{"key1":"value1","key2":"value2"}"
+data "aws_secretsmanager_secret_version" "env_secret" {
+ secret_id = var.env_aws_secret
+}
+
+resource "local_file" "tf-secretdotenv" {
+ filename = format("%s/%s", abspath(path.root), "aws.env")
+ content = "${local.s3_secret_string}\n"
+}
+
+locals {
+ s3_secret_raw = nonsensitive(jsondecode(data.aws_secretsmanager_secret_version.env_secret.secret_string))
+ s3_secret_string = join("\n", [for k, v in local.s3_secret_raw : "${k}=\"${v}\""])
+}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/secretmanager_get/aws_secretmanager_get_vars.tf b/operations/deployment/terraform/modules/aws/secretmanager_get/aws_secretmanager_get_vars.tf
new file mode 100644
index 00000000..a06f9e6f
--- /dev/null
+++ b/operations/deployment/terraform/modules/aws/secretmanager_get/aws_secretmanager_get_vars.tf
@@ -0,0 +1 @@
+variable "env_aws_secret" {}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/vpc/aws_vpc.tf b/operations/deployment/terraform/modules/aws/vpc/aws_vpc.tf
new file mode 100644
index 00000000..973a2a82
--- /dev/null
+++ b/operations/deployment/terraform/modules/aws/vpc/aws_vpc.tf
@@ -0,0 +1,154 @@
+#### VPC DEFAULT
+
+data "aws_vpc" "default" {
+ count = var.aws_vpc_create ? 0 : var.aws_vpc_id != "" ? 0 : 1
+ default = true
+}
+
+#### VPC IMPORT
+
+data "aws_vpc" "exisiting" {
+ count = var.aws_vpc_create ? 0 : var.aws_vpc_id != "" ? 1 : 0
+ id = var.aws_vpc_id
+}
+
+#### VPC CREATE
+
+resource "aws_vpc" "main" {
+ count = var.aws_vpc_create ? 1 : 0
+ cidr_block = var.aws_vpc_cidr_block
+ enable_dns_hostnames = "true"
+ tags = {
+ Name = var.aws_vpc_name != "" ? var.aws_vpc_name : "VPC for ${var.aws_resource_identifier}"
+ }
+}
+
+### Private
+
+ resource "aws_subnet" "private" {
+ count = var.aws_vpc_create ? length(local.aws_vpc_private_subnets) : 0
+ vpc_id = aws_vpc.main[0].id
+ cidr_block = element(local.aws_vpc_private_subnets, count.index)
+ availability_zone = element(local.aws_vpc_availability_zones, count.index)
+
+ tags = {
+ Name = "${var.aws_resource_identifier}-private${count.index + 1}"
+ Tier = "Private"
+ }
+}
+
+resource "aws_route_table" "private" {
+ count = var.aws_vpc_create ? 1 : 0
+ vpc_id = aws_vpc.main[0].id
+ tags = {
+ Name = "${var.aws_resource_identifier}-private"
+ }
+ depends_on = [ aws_vpc.main ]
+}
+
+resource "aws_route_table_association" "private" {
+ count = var.aws_vpc_create ? length(local.aws_vpc_private_subnets) : 0
+ subnet_id = element(aws_subnet.private.*.id, count.index)
+ route_table_id = aws_route_table.private[0].id
+}
+
+### Public
+
+resource "aws_subnet" "public" {
+ count = var.aws_vpc_create ? length(local.aws_vpc_public_subnets) : 0
+ vpc_id = aws_vpc.main[0].id
+ cidr_block = element(local.aws_vpc_public_subnets, count.index)
+ availability_zone = element(local.aws_vpc_availability_zones, count.index)
+ map_public_ip_on_launch = true
+
+ tags = {
+ Name = "${var.aws_resource_identifier}-public${count.index + 1}"
+ Tier = "Public"
+ }
+ depends_on = [ aws_vpc.main ]
+}
+
+resource "aws_route_table" "public" {
+ count = var.aws_vpc_create ? 1 : 0
+ vpc_id = aws_vpc.main[0].id
+
+ tags = {
+ Name = "${var.aws_resource_identifier}-public"
+ }
+ depends_on = [ aws_vpc.main ]
+}
+
+resource "aws_route_table_association" "public" {
+ count = var.aws_vpc_create ? length(local.aws_vpc_public_subnets) : 0
+ subnet_id = element(aws_subnet.public.*.id, count.index)
+ route_table_id = aws_route_table.public[0].id
+}
+
+resource "aws_internet_gateway" "gw" {
+ count = var.aws_vpc_create ? 1 : 0
+ vpc_id = aws_vpc.main[0].id
+ depends_on = [ aws_vpc.main ]
+}
+resource "aws_route" "public" {
+ count = var.aws_vpc_create ? 1 : 0
+ route_table_id = aws_route_table.public[0].id
+ destination_cidr_block = "0.0.0.0/0"
+ gateway_id = aws_internet_gateway.gw[0].id
+}
+
+### Data source
+
+locals {
+ aws_vpc_public_subnets = var.aws_vpc_public_subnets != "" ? [for n in split(",", var.aws_vpc_public_subnets) : (n)] : []
+ aws_vpc_private_subnets = var.aws_vpc_private_subnets != "" ? [for n in split(",", var.aws_vpc_private_subnets) : (n)] : []
+ aws_vpc_availability_zones = var.aws_vpc_availability_zones != "" ? [for n in split(",", var.aws_vpc_availability_zones) : (n)] : local.reordered_availability_zones # data.aws_availability_zones.all.names
+ selected_vpc_id = var.aws_vpc_create ? aws_vpc.main[0].id : var.aws_vpc_id != "" ? var.aws_vpc_id : data.aws_vpc.default[0].id
+}
+
+# Get the VPC details
+data "aws_vpc" "selected" {
+ id = local.selected_vpc_id
+}
+
+# Sort the AZ list, and ensure that the az from the existing EC2 instance is first in the list
+
+locals {
+ sorted_availability_zones = sort(data.aws_availability_zones.all.names)
+ index_of_existing_az = index(local.sorted_availability_zones, local.aws_ec2_zone_selected)
+
+ before_existing_az = local.index_of_existing_az == 0 ? [] : slice(local.sorted_availability_zones, 0, local.index_of_existing_az)
+ after_existing_az = local.index_of_existing_az == length(local.sorted_availability_zones) -1 ? [] : slice(local.sorted_availability_zones, local.index_of_existing_az + 1, length(local.sorted_availability_zones))
+
+ reordered_availability_zones = concat(
+ [element(local.sorted_availability_zones, local.index_of_existing_az)],
+ local.before_existing_az,
+ local.after_existing_az
+ )
+}
+
+### Outputs
+
+output "aws_selected_vpc_id" {
+ description = "The subnet ids from the default vpc"
+ #value = local.selected_vpc_id
+ value = var.aws_vpc_create ? aws_vpc.main[0].id : var.aws_vpc_id != "" ? var.aws_vpc_id : data.aws_vpc.default[0].id
+}
+
+output "aws_selected_vpc_subnets" {
+ description = "The subnet ids from the default vpc"
+ value = data.aws_subnets.vpc_subnets.ids
+}
+
+output "aws_vpc_subnet_selected" {
+ value = local.use_default ? data.aws_subnet.default_selected[0].id : data.aws_subnet.selected[0].id
+}
+
+output "aws_region_current_name" {
+ description = "Current region name"
+ value = data.aws_region.current.name
+}
+
+output "aws_vpc_cidr_block" {
+ description = "CIDR block of chosen VPC"
+ value = data.aws_vpc.selected.cidr_block
+}
\ No newline at end of file
diff --git a/operations/deployment/terraform/aws/aws_default_azs.tf b/operations/deployment/terraform/modules/aws/vpc/aws_vpc_azs.tf
similarity index 58%
rename from operations/deployment/terraform/aws/aws_default_azs.tf
rename to operations/deployment/terraform/modules/aws/vpc/aws_vpc_azs.tf
index 2ebb50e0..d6157d86 100644
--- a/operations/deployment/terraform/aws/aws_default_azs.tf
+++ b/operations/deployment/terraform/modules/aws/vpc/aws_vpc_azs.tf
@@ -1,40 +1,57 @@
# All regions have "a", skipping az validation
-data "aws_availability_zones" "all" {}
+data "aws_region" "current" {}
+
+data "aws_availability_zones" "all" {
+ filter {
+ name = "region-name"
+ values = [data.aws_region.current.name]
+ }
+}
+
+data "aws_subnets" "vpc_subnets" {
+ filter {
+ name = "vpc-id"
+ values = [local.selected_vpc_id]
+ }
+}
data "aws_subnet" "defaulta" {
+ count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}a") && local.use_default ? 1 : 0
availability_zone = "${data.aws_region.current.name}a"
default_for_az = true
}
data "aws_subnet" "defaultb" {
- count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}b") ? 1 : 0
+ count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}b") && local.use_default ? 1 : 0
availability_zone = "${data.aws_region.current.name}b"
default_for_az = true
}
data "aws_subnet" "defaultc" {
- count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}c") ? 1 : 0
+ count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}c") && local.use_default ? 1 : 0
availability_zone = "${data.aws_region.current.name}c"
default_for_az = true
}
data "aws_subnet" "defaultd" {
- count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}d") ? 1 : 0
+ count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}d") && local.use_default ? 1 : 0
availability_zone = "${data.aws_region.current.name}d"
default_for_az = true
}
data "aws_subnet" "defaulte" {
- count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}e") ? 1 : 0
+ count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}e") && local.use_default ? 1 : 0
availability_zone = "${data.aws_region.current.name}e"
default_for_az = true
}
data "aws_subnet" "defaultf" {
- count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}f") ? 1 : 0
+ count = contains(data.aws_availability_zones.all.names, "${data.aws_region.current.name}f") && local.use_default ? 1 : 0
availability_zone = "${data.aws_region.current.name}f"
default_for_az = true
}
locals {
+ use_default = var.aws_vpc_create ? false : var.aws_vpc_id != "" ? false : true
aws_ec2_instance_type_offerings = sort(data.aws_ec2_instance_type_offerings.region_azs.locations)
- preferred_az = var.availability_zone != null ? var.availability_zone : local.aws_ec2_instance_type_offerings[random_integer.az_select[0].result]
+ aws_ec2_zone_selected = local.aws_ec2_instance_type_offerings[random_integer.az_select[0].result]
+ preferred_az = var.aws_vpc_availability_zones != "" ? local.aws_vpc_availability_zones[0] : var.aws_vpc_id != "" ? data.aws_subnet.selected[0].availability_zone : local.aws_ec2_zone_selected
}
data "aws_ec2_instance_type_offerings" "region_azs" {
@@ -45,11 +62,6 @@ data "aws_ec2_instance_type_offerings" "region_azs" {
location_type = "availability-zone"
}
-data "aws_subnet" "selected" {
- count = contains(data.aws_availability_zones.all.names, local.preferred_az) ? 1 : 0
- availability_zone = local.preferred_az
- default_for_az = true
-}
resource "random_integer" "az_select" {
count = length(data.aws_ec2_instance_type_offerings.region_azs.locations) > 0 ? 1 : 0
@@ -62,23 +74,47 @@ resource "random_integer" "az_select" {
}
}
+data "aws_subnet" "default_selected" {
+ count = local.use_default ? contains(data.aws_availability_zones.all.names, local.preferred_az) ? 1 : 0 : 0
+ availability_zone = local.preferred_az
+ default_for_az = true #- What happens if I have multiple subnets in the same az?
+}
+
+data "aws_subnet" "selected" {
+ count = local.use_default ? 0 : 1
+ id = var.aws_vpc_subnet_id != "" ? var.aws_vpc_subnet_id : var.aws_vpc_create ? aws_subnet.public[0].id : data.aws_subnets.vpc_subnets.ids[0]
+}
+
+data "aws_security_group" "default" {
+ filter {
+ name = "group-name"
+ values = ["default"]
+ }
+ filter {
+ name = "vpc-id"
+ values = [local.selected_vpc_id]
+ }
+}
+
+output "aws_security_group_default_id" {
+ description = "The AWS Default SG Id"
+ value = data.aws_security_group.default.id
+}
+
output "instance_type_available" {
value = length(data.aws_ec2_instance_type_offerings.region_azs.locations) > 0 ? "EC2 Instance type valid for this region" : "EC2 Instance type invalid for this region."
}
###
locals {
- # no_zone_mapping: Creates a empty zone mapping object list
- no_zone_mapping = { "" : { "subnet_id" : "", "security_groups" : [""] } }
- # ec2_zone_mapping: Creates a zone mapping object list based on default values (default sg, default subnet, etc)
- ec2_zone_mapping = { "${local.preferred_az}" : { "subnet_id" : "${data.aws_subnet.selected[0].id}", "security_groups" : try([module.ec2[0].aws_security_group_ec2_sg_name],[""]) } }
-
+ aws_ec2_security_group_name = var.aws_ec2_security_group_name != "" ? var.aws_ec2_security_group_name : "SG for ${var.aws_resource_identifier} - EC2"
# auto_ha_availability_zone*: Creates zone map objects for each available AZ in a region
- auto_ha_availability_zonea = {
+ auto_ha_availability_zonea = length(data.aws_subnet.defaultb) > 0 ? ({
"${data.aws_region.current.name}a" : {
- "subnet_id" : data.aws_subnet.defaulta.id,
+ "subnet_id" : data.aws_subnet.defaulta[0].id,
"security_groups" : [data.aws_security_group.default.id]
- } }
+ }
+ }) : null
auto_ha_availability_zoneb = length(data.aws_subnet.defaultb) > 0 ? ({
"${data.aws_region.current.name}b" : {
"subnet_id" : data.aws_subnet.defaultb[0].id,
@@ -109,14 +145,28 @@ locals {
"security_groups" : [data.aws_security_group.default.id]
}
}) : null
+ chosen_subnet_id = try(data.aws_subnet.default_selected[0].id,data.aws_subnets.vpc_subnets.ids[0],aws_subnet.public[0].id)
# ha_zone_mapping: Creates a zone mapping object list for all available AZs in a region
ha_zone_mapping = merge(local.auto_ha_availability_zonea, local.auto_ha_availability_zoneb, local.auto_ha_availability_zonec, local.auto_ha_availability_zoned, local.auto_ha_availability_zonee, local.auto_ha_availability_zonef)
+ ec2_zone_mapping = { "${local.preferred_az}" : { "subnet_id" : "${local.chosen_subnet_id}", "security_groups" : ["${local.aws_ec2_security_group_name}"] } }
+}
+
+output "ha_zone_mapping" {
+ value = local.ha_zone_mapping
+}
+
+output "ec2_zone_mapping" {
+ value = local.ec2_zone_mapping
+}
+
+output "preferred_az" {
+ value = local.preferred_az
}
-#output "ec2_zone_mapping" {
-# value = local.ec2_zone_mapping
-#}
+output "aws_subnets" {
+ value = data.aws_subnets.vpc_subnets
+}
-#output "ha_zone_mapping" {
-# value = local.ha_zone_mapping
-#}
\ No newline at end of file
+output "availability_zones" {
+ value = data.aws_availability_zones.all.zone_ids
+}
\ No newline at end of file
diff --git a/operations/deployment/terraform/modules/aws/vpc/aws_vpc_vars.tf b/operations/deployment/terraform/modules/aws/vpc/aws_vpc_vars.tf
new file mode 100644
index 00000000..dc4027f2
--- /dev/null
+++ b/operations/deployment/terraform/modules/aws/vpc/aws_vpc_vars.tf
@@ -0,0 +1,15 @@
+variable "aws_vpc_create" {}
+variable "aws_vpc_id" {}
+variable "aws_vpc_subnet_id" {}
+variable "aws_vpc_cidr_block" {}
+variable "aws_vpc_name" {}
+variable "aws_vpc_public_subnets" {}
+variable "aws_vpc_private_subnets" {}
+variable "aws_vpc_availability_zones" {}
+variable "aws_ec2_instance_type" {}
+variable "aws_ec2_security_group_name" {}
+variable "aws_resource_identifier" {}
+variable "common_tags" {
+ type = map
+ default = {}
+}
\ No newline at end of file