diff --git a/delocp.sh b/delocp.sh deleted file mode 100755 index 6134c70..0000000 --- a/delocp.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -clusterId=$1 - -if [ -z $clusterId ]; then - exit 99 -fi - -terraform destroy -auto-approve & - -sleep 10 -workers=$(aws ec2 describe-instances --filters Name="tag:kubernetes.io/cluster/${clusterId}",Values="owned" --query 'Reservations[].Instances[].[InstanceId, Tags[?Key==`Name`] | [0].Value]' --output text | grep worker | cut -d$'\t' -f1) - -aws ec2 terminate-instances --instance-ids ${workers} - -vpcid=$(grep vpc terraform.tfstate | grep vpc_id | grep vpc- | head -1 | cut -d"\"" -f4) -elbname=$(aws elb describe-load-balancers --query 'LoadBalancerDescriptions[].[LoadBalancerName,VPCId]' --output text | cut -d$'\t' -f1) -aws elb delete-load-balancer --load-balancer-name ${elbname} - -sleep 300 - -sg=$(aws ec2 describe-security-groups --filters Name="tag:kubernetes.io/cluster/${clusterId}",Values="owned" --query 'SecurityGroups[].[GroupId,GroupName]' --output text | grep "k8s-elb" | cut -d$'\t' -f1) - -aws ec2 delete-security-group --group-id ${sg} - -sleep 60 - -aws s3 ls | grep ${clusterId} | awk '{print "aws s3 rb —force s3://"$3}' | bash - -aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId} - -aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId} | awk '{print "aws iam delete-user-policy --user-name "$1" --policy-name "$1"-policy"}' | bash - -aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId} | awk '{print "aws iam delete-access-key --user-name "$1" --access-key-id $(aws iam list-access-keys --user-name "$1" --query 'AccessKeyMetadata[].AccessKeyId' --output text)"}' | bash - -aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId} | awk '{print "aws iam delete-user --user-name "$1}' | bash - -exit 0 diff --git a/install/aws_cleanup.sh b/install/aws_cleanup.sh new file mode 100755 index 0000000..1ec2e02 --- /dev/null +++ b/install/aws_cleanup.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +path=$(dirname $0) +clusterId=$(cat $path/infraID) + +if [ -z "$clusterId" ]; then + exit 99 +fi + +if [ -z "$AWS_ACCESS_KEY_ID" ]; then + exit 80 +fi +if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then + exit 80 +fi + +echo "0 - Start processing for cluster $clusterId - waiting for masters to be destroyed" +masters=3 +while [ $masters -gt 0 ]; do + nodes=$(aws ec2 describe-instances --filters Name="tag:kubernetes.io/cluster/${clusterId}",Values="owned" Name="instance-state-name",Values="running" --query 'Reservations[].Instances[].[InstanceId, Tags[?Key==`Name`] | [0].Value]' --output text) + masters=$(echo "$nodes" | grep master | wc -l) + echo "Waiting for masters to be destroyed - $masters remaining" + if [ $masters -gt 0 ]; then + sleep 10 + fi +done + +workers=$(echo "$nodes" | cut -d$'\t' -f1) + +echo "1 - Deleting workers - $workers -" + aws ec2 terminate-instances --instance-ids ${workers} + +vpcid=$(aws ec2 describe-vpcs --filters Name="tag:kubernetes.io/cluster/${clusterId}",Values="owned" --query 'Vpcs[].VpcId' --output text) +elbname=$(aws elb describe-load-balancers --query 'LoadBalancerDescriptions[].[LoadBalancerName,VPCId]' --output text | grep $vpcid | cut -d$'\t' -f1) +echo "2 - Deleting apps load balancers - $elbname - " + aws elb delete-load-balancer --load-balancer-name ${elbname} + +sleep 30 + +sg=$(aws ec2 describe-security-groups --filters Name="tag:kubernetes.io/cluster/${clusterId}",Values="owned" --query 'SecurityGroups[].[GroupId,GroupName]' --output text | grep "k8s-elb" | cut -d$'\t' -f1) +echo "3 - Deleting elb security group - $sg -" + + aws ec2 delete-security-group --group-id ${sg} +sleep 10 + +s3imagereg=$(aws s3 ls | grep ${clusterId} | awk '{print $3}') +echo "4 - Deleting S3 image-registry $s3imagereg -" + aws s3 rb --force s3://$s3imagereg + +iamusers=$(aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId}) +echo "5 - Deleting iamusers - $iamusers" + + echo "$iamusers" | awk '{print "aws iam delete-user-policy --user-name "$1" --policy-name "$1"-policy"}' | bash + echo "$iamusers" | awk '{print "aws iam delete-access-key --user-name "$1" --access-key-id $(aws iam list-access-keys --user-name "$1" --query 'AccessKeyMetadata[].AccessKeyId' --output text)"}' | bash + echo "$iamusers" | awk '{print "aws iam delete-user --user-name "$1}' | bash + +exit 0 diff --git a/install/installer.tf b/install/installer.tf index 1037bba..897428e 100644 --- a/install/installer.tf +++ b/install/installer.tf @@ -80,7 +80,17 @@ baseDomain: ${var.domain} compute: - hyperthreading: Enabled name: worker - replicas: 1 + replicas: 3 + platform: + aws: + rootVolume: + iops: ${var.aws_worker_root_volume_iops} + size: ${var.aws_worker_root_volume_size} + type: ${var.aws_worker_root_volume_type} + type: ${var.aws_worker_instance_type} + zones: + %{ for zone in var.aws_worker_availability_zones} + - ${zone}%{ endfor } controlPlane: hyperthreading: Enabled name: master @@ -161,277 +171,16 @@ resource "null_resource" "manifest_cleanup_control_plane_machineset" { } } -# rewrite the domains and the infrastructure id we use in the cluster -resource "local_file" "cluster_infrastructure_config" { - depends_on = [ - null_resource.generate_manifests - ] - file_permission = "0644" - filename = "${path.module}/temp/manifests/cluster-infrastructure-02-config.yml" - - content = <