diff --git a/1_private_network/ecr.tf b/1_private_network/ecr.tf deleted file mode 100644 index 935156d..0000000 --- a/1_private_network/ecr.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_ecr_repository" "ocp4" { - count = var.airgapped ? 1 : 0 - name = local.infrastructure_id - image_tag_mutability = "MUTABLE" - - image_scanning_configuration { - scan_on_push = true - } -} diff --git a/1_private_network/output.tf b/1_private_network/output.tf deleted file mode 100644 index 9c9cbdc..0000000 --- a/1_private_network/output.tf +++ /dev/null @@ -1,23 +0,0 @@ -output "infrastructure_id" { - value = local.infrastructure_id -} - -output "clustername" { - value = var.clustername -} - -output "private_vpc_id" { - value = aws_vpc.ocp_vpc.id -} - -output "private_vpc_private_subnet_ids" { - value = aws_subnet.ocp_pri_subnet.*.id -} - -#output "private_vpc_public_subnet_ids" { -# value = aws_subnet.ocp_pub_subnet.*.id -#} - -output "private_ecr_repository_url" { - value = var.airgapped ? aws_ecr_repository.ocp4.0.name : "" -} diff --git a/1_private_network/provider.tf b/1_private_network/provider.tf deleted file mode 100644 index cbb295b..0000000 --- a/1_private_network/provider.tf +++ /dev/null @@ -1,14 +0,0 @@ -provider "aws" { - region = var.aws_region -} - -data "aws_caller_identity" "current" { -} - -resource "random_id" "clusterid" { - byte_length = "2" -} - -locals { - infrastructure_id = "${var.infrastructure_id != "" ? "${var.infrastructure_id}" : "${var.clustername}-${random_id.clusterid.hex}"}" -} diff --git a/1_private_network/security_group.tf b/1_private_network/security_group.tf deleted file mode 100644 index 4596ba6..0000000 --- a/1_private_network/security_group.tf +++ /dev/null @@ -1,77 +0,0 @@ -resource "aws_security_group" "private_ec2_api" { - name = "${local.infrastructure_id}-ec2-api" - vpc_id = aws_vpc.ocp_vpc.id - - tags = merge( - var.default_tags, - map( - "Name", "${local.infrastructure_id}-private-ec2-api", - ) - ) -} - -# allow anybody in the VPC to talk to ec2 through the private endpoint -resource "aws_security_group_rule" "private_ec2_ingress" { - type = "ingress" - - from_port = 0 - to_port = 65535 - protocol = "tcp" - cidr_blocks = [ - var.vpc_cidr - ] - - security_group_id = aws_security_group.private_ec2_api.id -} - -resource "aws_security_group_rule" "private_ec2_api_egress" { - type = "egress" - - from_port = 0 - to_port = 0 - protocol = "all" - cidr_blocks = [ - "0.0.0.0/0" - ] - - security_group_id = aws_security_group.private_ec2_api.id -} - -resource "aws_security_group" "private_ecr_api" { - name = "${local.infrastructure_id}-ecr-api" - vpc_id = aws_vpc.ocp_vpc.id - - tags = merge( - var.default_tags, - map( - "Name", "${local.infrastructure_id}-private-ecr-api", - ) - ) -} - -# allow anybody in the VPC to talk to ecr through the private endpoint -resource "aws_security_group_rule" "private_ecr_ingress" { - type = "ingress" - - from_port = 0 - to_port = 65535 - protocol = "tcp" - cidr_blocks = [ - var.vpc_cidr - ] - - security_group_id = aws_security_group.private_ecr_api.id -} - -resource "aws_security_group_rule" "private_ecr_api_egress" { - type = "egress" - - from_port = 0 - to_port = 0 - protocol = "all" - cidr_blocks = [ - "0.0.0.0/0" - ] - - security_group_id = aws_security_group.private_ecr_api.id -} diff --git a/1_private_network/subnet.tf b/1_private_network/subnet.tf deleted file mode 100644 index 996776b..0000000 --- a/1_private_network/subnet.tf +++ /dev/null @@ -1,238 +0,0 @@ -# Create private subnet in each AZ for the dmz -resource "aws_subnet" "ocp_pri_subnet" { - count = length(var.aws_azs) - - vpc_id = aws_vpc.ocp_vpc.id - cidr_block = element(var.vpc_private_subnet_cidrs, count.index) - availability_zone = format("%s%s", element(list(var.aws_region), count.index), element(var.aws_azs, count.index)) - - tags = merge( - var.default_tags, - map( - "Name", format("${local.infrastructure_id}-pub-%s-pri", format("%s%s", element(list(var.aws_region), count.index), element(var.aws_azs, count.index))), - "kubernetes.io/cluster/${local.infrastructure_id}", "shared" - ) - ) -} - -resource "aws_route_table" "ocp_pri_net_route_table" { - count = length(var.aws_azs) - - vpc_id = aws_vpc.ocp_vpc.id - - tags = merge( - var.default_tags, - map( - "Name", format("${local.infrastructure_id}-pub-rtbl-%s-pri", format("%s%s", element(list(var.aws_region), count.index), element(var.aws_azs, count.index))), - "kubernetes.io/cluster/${local.infrastructure_id}", "shared") - ) -} - -resource "aws_route_table_association" "ocp_pri_net_route_table_assoc" { - count = length(var.aws_azs) - - subnet_id = element(aws_subnet.ocp_pri_subnet.*.id, count.index) - route_table_id = element(aws_route_table.ocp_pri_net_route_table.*.id, count.index) -} - -# private S3 endpoint -data "aws_vpc_endpoint_service" "s3" { - service = "s3" -} - -resource "aws_vpc_endpoint" "private_s3" { - vpc_id = aws_vpc.ocp_vpc.id - service_name = data.aws_vpc_endpoint_service.s3.service_name - - policy = < $pullDir/newPullSecret.json - -OCP_RELEASE=$(oc version --client | cut -d- -f3) - -echo oc adm -a newPullSecret.json release mirror --from=quay.io/openshift-release-dev/ocp-release:${OCP_RELEASE} \ - --to=${repoHost}/${repoName} --to-release-image=${repoHost}/${repoName}:${OCP_RELEASE} diff --git a/6_bootstrap/rhcos.tf b/6_bootstrap/rhcos.tf deleted file mode 100644 index 156c3ec..0000000 --- a/6_bootstrap/rhcos.tf +++ /dev/null @@ -1,11 +0,0 @@ -data "aws_ami" "rhcos" { - most_recent = true - - owners = ["531415883065"] - - filter { - name = "image-id" - values = ["${var.ami}"] - } - -} diff --git a/6_bootstrap/route53.tf b/6_bootstrap/route53.tf deleted file mode 100644 index 38cb911..0000000 --- a/6_bootstrap/route53.tf +++ /dev/null @@ -1,4 +0,0 @@ -data "aws_route53_zone" "ocp_private" { - zone_id = var.ocp_route53_private_zone_id -} - diff --git a/6_bootstrap/s3.tf b/6_bootstrap/s3.tf deleted file mode 100644 index 5aacd22..0000000 --- a/6_bootstrap/s3.tf +++ /dev/null @@ -1,26 +0,0 @@ - -resource "random_id" "bucketid" { - byte_length = "8" -} - -resource "aws_s3_bucket" "ocp_ignition" { - bucket = "${local.infrastructure_id}-infra-${random_id.bucketid.hex}" - acl = "private" - - tags = merge( - var.default_tags, - map( - "Name", "${local.infrastructure_id}-infra-${random_id.bucketid.hex}" - ) - ) -} - -resource "aws_s3_bucket_object" "bootstrap_ign" { - depends_on = [ - null_resource.generate_ignition_config - ] - - bucket = aws_s3_bucket.ocp_ignition.id - key = "bootstrap.ign" - content = data.local_file.bootstrap_ign.content -} diff --git a/6_bootstrap/security_group.tf b/6_bootstrap/security_group.tf deleted file mode 100644 index 4bfcf4e..0000000 --- a/6_bootstrap/security_group.tf +++ /dev/null @@ -1,48 +0,0 @@ -data "aws_security_group" "master" { - id = var.ocp_control_plane_security_group_id -} - -data "aws_security_group" "worker" { - id = var.ocp_worker_security_group_id -} - -resource "aws_security_group" "bootstrap" { - name = "${local.infrastructure_id}-bootstrap" - vpc_id = data.aws_vpc.ocp_vpc.id - - tags = merge( - var.default_tags, - map( - "Name", "${local.infrastructure_id}-bootstrap", - "kubernetes.io/cluster/${local.infrastructure_id}", "shared" - ) - ) -} - -# TODO do we need SSH? -resource "aws_security_group_rule" "bootstrap_ssh" { - type = "ingress" - - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - "0.0.0.0/0" - ] - - security_group_id = aws_security_group.bootstrap.id -} - -# TODO huh? -resource "aws_security_group_rule" "bootstrap_19531" { - type = "ingress" - - from_port = 19531 - to_port = 19531 - protocol = "tcp" - cidr_blocks = [ - "0.0.0.0/0" - ] - - security_group_id = aws_security_group.bootstrap.id -} diff --git a/6_bootstrap/variables.tf b/6_bootstrap/variables.tf deleted file mode 100644 index 69edce2..0000000 --- a/6_bootstrap/variables.tf +++ /dev/null @@ -1,122 +0,0 @@ -####### AWS Access and Region Details ############################# -variable "aws_region" { - default = "us-east-2" - description = "One of us-east-2, us-east-1, us-west-1, us-west-2, ap-south-1, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, us-west-2, eu-central-1, eu-west-1, eu-west-2, sa-east-1" -} - -variable "aws_azs" { - type = list(string) - description = "The availability zone letter appendix you want to deploy to in the selected region " - default = ["a", "b", "c"] -} - -variable "default_tags" { - default = {} -} - -####### AWS Deployment Details #################################### -# SSH Key -variable "ami" { default = "" } - -variable "aws_access_key_id" { - default = "" -} - -variable "aws_secret_access_key" { - default = "" -} - -variable "infrastructure_id" { - default = "" -} - -variable "clustername" { default = "ocp4" } - -variable "private_vpc_id" { default = "" } - -# Subnet Details -variable "private_vpc_private_subnet_ids" { - description = "List of subnet ids" - type = list(string) - default = [] -} - -variable "domain" { - default = "example.com" -} - -variable "cluster_network_cidr" { default = "192.168.0.0/17" } -variable "cluster_network_host_prefix" { default = "23" } -variable "service_network_cidr" { default = "192.168.128.0/24" } - -variable "bootstrap" { - default = { - type = "i3.xlarge" - } -} -variable "control_plane" { - default = { - count = "3" - type = "m4.xlarge" - disk = "120" - } -} - -variable "worker" { - default = { - count = "3" - type = "m4.large" - disk = "120" - } -} - -variable "openshift_pull_secret" { - default = "./openshift_pull_secret.json" -} - -variable "use_worker_machinesets" { - default = true -} - -variable "openshift_installer_url" { - default = "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest" -} - -variable "ocp_control_plane_security_group_id" { - default = "" -} - -variable "ocp_worker_security_group_id" { - default = "" -} - -variable "ocp_master_instance_profile_id" { - default = "" -} - -variable "ocp_worker_instance_profile_id" { - default = "" -} - -variable "ocp_control_plane_lb_int_arn" { - default = "" -} -variable "ocp_control_plane_lb_int_22623_tg_arn" { - default = "" -} - -variable "ocp_control_plane_lb_int_6443_tg_arn" { - default = "" -} - -variable "ocp_route53_private_zone_id" { - default = "" -} - -variable "airgapped" { - default = false -} - -variable repository { - default = "" -} diff --git a/6_bootstrap/vpc.tf b/6_bootstrap/vpc.tf deleted file mode 100644 index 47768cb..0000000 --- a/6_bootstrap/vpc.tf +++ /dev/null @@ -1,8 +0,0 @@ -data "aws_vpc" "ocp_vpc" { - id = var.private_vpc_id -} - -data "aws_subnet" "ocp_pri_subnet" { - count = length(var.private_vpc_private_subnet_ids) - id = "${element(var.private_vpc_private_subnet_ids, count.index)}" -} diff --git a/7_control_plane/control_plane.tf b/7_control_plane/control_plane.tf deleted file mode 100644 index 1a7e836..0000000 --- a/7_control_plane/control_plane.tf +++ /dev/null @@ -1,32 +0,0 @@ -resource "aws_instance" "master" { - # if master nodes are already created, don't trigger a destroy/recreate if we don't - # have to, which are triggered on user_data changes - count = lookup(var.control_plane, "count", 3) - - ami = data.aws_ami.rhcos.id - instance_type = lookup(var.control_plane, "type", "m4.xlarge") - subnet_id = element(data.aws_subnet.ocp_pri_subnet.*.id, count.index) - iam_instance_profile = data.aws_iam_instance_profile.ocp_ec2_master_instance_profile.name - - vpc_security_group_ids = [ - data.aws_security_group.master.id, - ] - - root_block_device { - volume_size = lookup(var.control_plane, "disk", 120) - } - - associate_public_ip_address = false - availability_zone = element(data.aws_availability_zone.aws_azs.*.name, count.index) - - tags = merge( - var.default_tags, - map( - "Name", format("${local.infrastructure_id}-master%02d", count.index + 1), - "kubernetes.io/cluster/${local.infrastructure_id}", "shared" - ) - ) - user_data = <createRS.json -{ - "Comment": "Creating Alias resource record sets in Route 53", - "Changes": [{ - "Action": "CREATE", - "ResourceRecordSet": { - "Name": "*.apps.${clustername}.${domain}", - "Type": "A", - "AliasTarget":{ - "HostedZoneId": "$lbzone", - "DNSName": "$lbhost", - "EvaluateTargetHealth": false - }} - }] -} -EOF - -if [ $airgapped -ne "true" ]; then - aws route53 change-resource-record-sets --hosted-zone-id $rte53zone --change-batch file://createRS.json -fi - -rm createRS.json - -exit 0 diff --git a/8_postinstall/variables.tf b/8_postinstall/variables.tf deleted file mode 100644 index 5b44298..0000000 --- a/8_postinstall/variables.tf +++ /dev/null @@ -1,29 +0,0 @@ -####### AWS Access and Region Details ############################# -variable "aws_region" { - default = "us-east-2" - description = "One of us-east-2, us-east-1, us-west-1, us-west-2, ap-south-1, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, us-west-2, eu-central-1, eu-west-1, eu-west-2, sa-east-1" -} - -variable "aws_azs" { - type = list(string) - description = "The availability zone letter appendix you want to deploy to in the selected region " - default = ["a", "b", "c"] -} - -variable "default_tags" { - default = {} -} - -variable "infrastructure_id" { - default = "" -} - -variable "clustername" { default = "ocp4" } - -variable "domain" { - default = "example.com" -} - -variable "airgapped" { - default = false -} diff --git a/8_postinstall/wait.tf b/8_postinstall/wait.tf deleted file mode 100644 index b6f9544..0000000 --- a/8_postinstall/wait.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "null_resource" "postinstall" { - - provisioner "local-exec" { - when = create - command = "${path.module}/postinstall.sh ${var.infrastructure_id} ${var.clustername} ${var.domain} ${var.airgapped}" - } -} diff --git a/8_postinstall/worker_config.tftemplate b/8_postinstall/worker_config.tftemplate deleted file mode 100644 index f85300e..0000000 --- a/8_postinstall/worker_config.tftemplate +++ /dev/null @@ -1,44 +0,0 @@ -data "aws_ami" "rhcos" { - most_recent = true - owners = ["531415883065"] -} - -resource "aws_instance" "worker_machines" { - count = 3 - - ami = data.aws_ami.rhcos.id - instance_type = "m4.large" -} - -resource "aws_elb" "ocp_compute_elb" { - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_security_group" "compute_elb" { -} - -#data "aws_route53_zone" "ocp_private" { -# name = "${var.clustername}.${var.domain}" -#} - -#data "aws_route53_zone" "ocp_public" { -# name = var.domain -#} - -resource "aws_route53_record" "compute_apps" { - name = "*.apps.${var.clustername}.${var.domain}" - type = "A" - zone_id = "aaa" -} - -resource "aws_route53_record" "compute_apps_public" { - count = var.airgapped ? 0 : 1 - name = "*.apps.${var.clustername}.${var.domain}" - type = "A" - zone_id = "aaa" -} diff --git a/8_postinstall/worker_import.sh b/8_postinstall/worker_import.sh deleted file mode 100644 index 3b1052b..0000000 --- a/8_postinstall/worker_import.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin.bash -# Invoked woth the implementation ID -# Collect AWS ids and insert into terraform -clusterid=$1 # "ocp42ss-gse01" -clustername=$2 #"ocp42ss" -domain=$3 # "vbudi.cf" - -if [ -z $domain ]; then - echo "Arguments are clusterID clusterName Domain airgapped" - exit 999 -fi - -MY_PATH=$(dirname "$0") # relative -cp $MY_PATH/worker_config.tftemplate $MY_PATH/worker_config.tf - -instList=$(aws ec2 describe-instances --filters "Name=instance.group-name,Values=${clusterid}-worker" --query "Reservations[*].Instances[*].{Instance:InstanceId}" | grep Instance | cut -d\" -f4) -i=0 -echo $instList -for instName in $instList -do - terraform import "module.postinstall.aws_instance.workermachines[$i]" $instName - i=$((i+1)) -done - -lb_list=$(aws elb describe-load-balancers | jq '."LoadBalancerDescriptions" | .[]."LoadBalancerName"') -for lbname in $lb_list; do - lbname=$(echo $lbname | tr -d '"') - jqargs=".\"TagDescriptions\" | .[].Tags | .[] | select(.Key == \"kubernetes.io/cluster/${clusterid}\") | .Value" - klval=$(aws elb describe-tags --load-balancer-names ${lbname} | jq "$jqargs" ) - if [ $klval = '"owned"' ]; then - found=1 - created_lb=$lbname - fi -done -terraform import module.postinstall.aws_elb.ocp_compute_elb ${created_lb} - -sg_elb=$(aws ec2 describe-security-groups --query "SecurityGroups[*].GroupName" | grep "k8s-elb" | cut -d\" -f2) -sg_name=$(aws ec2 describe-security-groups --query "SecurityGroups[*].GroupId" --filter "Name=group-name,Values=$sg_elb" | grep sg | cut -d\" -f2) -terraform import module.postinstall.aws_security_group.compute_elb ${sg_name} - -if [ $airgapped -ne "true" ]; then - rte53pubargs=".HostedZones | .[] | select(.Name == \"${domain}.\") | .Id" - rte53pubzone=$(aws route53 list-hosted-zones | jq "$rte53pubargs" | tr -d '"') - terraform import module.postinstall.aws_route53_record.compute_apps_public[0] ${rte53pubzone}_*.apps.${clustername}.${domain}_A -fi - -rte53priargs=".HostedZones | .[] | select(.Name == \"${clustername}.${domain}.\") | .Id" -rte53prizone=$(aws route53 list-hosted-zones | jq "$rte53priargs" | tr -d '"') -terraform import module.postinstall.aws_route53_record.compute_apps ${rte53prizone}_*.apps.${clustername}.${domain}_A - - -# must decouple Security group $sg_name from $clusterid-master and $clusterid-worker - -aws ec2 revoke-security-group-ingress --group-name $clusterid-master --source_group $sg_name -aws ec2 revoke-security-group-ingress --group-name $clusterid-worker --source_group $sg_name diff --git a/OWNERS b/OWNERS new file mode 100644 index 0000000..6e59d68 --- /dev/null +++ b/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - aws-approvers +reviewers: + - aws-reviewers diff --git a/bootstrap/README.md b/bootstrap/README.md new file mode 100644 index 0000000..b91c4e9 --- /dev/null +++ b/bootstrap/README.md @@ -0,0 +1,48 @@ +# Bootstrap Module + +This [Terraform][] [module][] manages [AWS][] resources only needed during cluster bootstrapping. +It uses [implicit provider inheritance][implicit-provider-inheritance] to access the [AWS provider][AWS-provider]. + +## Example + +Set up a `main.tf` with: + +```hcl +provider "aws" { + region = "us-east-1" +} + +resource "aws_vpc" "example" { + cidr_block = "10.0.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true +} + +resource "aws_subnet" "example" { + vpc_id = "${aws_vpc.example.id}" + cidr_block = "${aws_vpc.example.cidr_block}" +} + +module "bootstrap" { + source = "github.com/openshift/installer//data/data/aws/bootstrap" + + ami = "ami-0af8953af3ec06b7c" + cluster_id = "my-cluster" + ignition = "{\"ignition\": {\"version\": \"2.2.0\"}}", + subnet_id = "${aws_subnet.example.id}" + vpc_id = "${aws_vpc.example.id}" +} +``` + +Then run: + +```console +$ terraform init +$ terraform plan +``` + +[AWS]: https://aws.amazon.com/ +[AWS-provider]: https://www.terraform.io/docs/providers/aws/ +[implicit-provider-inheritance]: https://www.terraform.io/docs/modules/usage.html#implicit-provider-inheritance +[module]: https://www.terraform.io/docs/modules/ +[Terraform]: https://www.terraform.io/ diff --git a/bootstrap/main.tf b/bootstrap/main.tf new file mode 100644 index 0000000..2b5cbfa --- /dev/null +++ b/bootstrap/main.tf @@ -0,0 +1,196 @@ +locals { + public_endpoints = var.publish_strategy == "External" ? true : false +} + +resource "aws_s3_bucket" "ignition" { + acl = "private" + + tags = merge( + { + "Name" = "${var.cluster_id}-bootstrap" + }, + var.tags, + ) + + lifecycle { + ignore_changes = all + } +} + +resource "aws_s3_bucket_object" "ignition" { + bucket = aws_s3_bucket.ignition.id + key = "bootstrap.ign" + content = var.ignition + acl = "private" + + server_side_encryption = "AES256" + + tags = merge( + { + "Name" = "${var.cluster_id}-bootstrap" + }, + var.tags, + ) + + lifecycle { + ignore_changes = all + } +} + +data "ignition_config" "redirect" { + replace { + source = "s3://${aws_s3_bucket.ignition.id}/bootstrap.ign" + } +} + +resource "aws_iam_instance_profile" "bootstrap" { + name = "${var.cluster_id}-bootstrap-profile" + + role = aws_iam_role.bootstrap.name +} + +resource "aws_iam_role" "bootstrap" { + name = "${var.cluster_id}-bootstrap-role" + path = "/" + + assume_role_policy = <