Skip to content
This repository has been archived by the owner on Oct 11, 2021. It is now read-only.

Nested stack #134

Open
wants to merge 13 commits into
base: develop
Choose a base branch
from
Open
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,5 @@
# taskcat
taskcat_outputs/

*.tgz
*.idea
3 changes: 3 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ install:
git:
submodules: false
before_install:
# DELETE ME
- export BASE_IP=0.0.0.0/0

- sed -i 's/[email protected]:/https:\/\/github.com\//' .gitmodules
- git submodule update --init --recursive
- git checkout .gitmodules
Expand Down
35 changes: 35 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# CHANGELOG

Fork: 31.09.2019
Branch: develop
Contact: [email protected]

## Changes in Branch Develop

### 1. Comments

- Comments are good, we love comments!!

### 2. Structure

- More granular templates folder <- easier for maintaining and debugging
* `turbine-resource.template` contains all the Turbine support services
* The security groups have a dedicated template
* CI has its own sub folder
- Templates split in `cluster`, `services`, and `ci`

### 3. Log and Deployment Bucket

- Private Buckets by default (explicit)

Incident: After dag run, CloudFormation DELETE_FAILED with "Logs and Deployment
Bucket are not empty"

- Added custom Cfn event + Lambda function for cleaning deployments bucket
content when delete-stack
- Retain Logs bucket for error investigation or dag data archiving





43 changes: 42 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
define message1
Environment variable BASE_IP is required. Not set.
Use following command:
"$$ my_ip=`curl ipinfo.io | jq .ip`;eval my_ip=$${my_ip[i]};my_ip="$$my_ip/32"; export BASE_IP=$$my_ip"

endef

ifndef BASE_IP
export message1
$(error $(message1))
endif

ifndef BRANCH
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
endif
Expand All @@ -8,12 +20,41 @@ else
BUCKET := s3://turbine-quickstart/quickstart-turbine-airflow-$(BRANCH)
endif

# turbine-master
CURRENT_LOCAL_IP = $(BASE_IP)
# DELETE ME
AWS_REGION := eu-central-1
PROJECT_NAME := eksairflow01-staging

lint:
cfn-lint templates/*.template
cfn-lint templates/cluster/*.template
cfn-lint templates/services/*.template

test:
taskcat -c ./ci/taskcat.yaml

sync:
aws s3 sync --exclude '.*' --acl public-read . $(BUCKET)

# DELETE ME
artifacts:
aws s3 cp --recursive submodules/quickstart-aws-vpc s3://${PROJECT_NAME}-${AWS_REGION}/${PROJECT_NAME}submodules/quickstart-aws-vpc/templates/
aws s3 cp --recursive templates/cluster s3://${PROJECT_NAME}-${AWS_REGION}/${PROJECT_NAME}templates
aws s3 cp --recursive templates/services s3://${PROJECT_NAME}-${AWS_REGION}/${PROJECT_NAME}templates
aws s3 cp --recursive templates/ci s3://${PROJECT_NAME}-${AWS_REGION}/${PROJECT_NAME}templates

# DELETE ME
cluster:
aws cloudformation --region ${AWS_REGION} create-stack --stack-name ${PROJECT_NAME} \
--template-body file://templates/turbine-master.template \
--parameters \
ParameterKey="AllowedWebBlock",ParameterValue="${CURRENT_LOCAL_IP}" \
ParameterKey="DbMasterPassword",ParameterValue="super_secret" \
ParameterKey="QSS3BucketName",ParameterValue="${PROJECT_NAME}-${AWS_REGION}" \
ParameterKey="QSS3KeyPrefix",ParameterValue="${PROJECT_NAME}" \
--capabilities CAPABILITY_NAMED_IAM

# DELETE ME
clean:
aws cloudformation delete-stack --stack-name ${PROJECT_NAME}

16 changes: 12 additions & 4 deletions examples/project/Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
ifndef stack-name
$(error stack-name is not set)
define message1
Environment variable stack_name is required. Not set.
Use following command:
"$$ export stack_name=<your stack>"

endef

ifndef stack_name
$(error $(message1))
endif
ifndef revision
revision := $(shell date --utc +%Y%m%dT%H%M%SZ)
Expand All @@ -8,16 +15,17 @@ endif

define getRef
$(shell aws cloudformation describe-stacks \
--stack-name $(stack-name) \
--stack-name $(stack_name) \
--query "Stacks[0].Outputs[?OutputKey=='$(1)'].OutputValue" \
--output text)
endef

APPLICATION := $(call getRef,CodeDeployApplication)
DEPLOYMENT_GROUP := $(call getRef,CodeDeployDeploymentGroup)
DEPLOYMENTS_BUCKET := $(call getRef,DeploymentsBucket)


PACKAGE := $(stack-name)_$(revision).tgz
PACKAGE := $(stack_name)_$(revision).tgz


package:
Expand Down
189 changes: 189 additions & 0 deletions templates/ci/turbine-codedeploy.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
AWSTemplateFormatVersion: "2010-09-09"

Description: >-
CI CodeDeploy resources for Airflow build.

NOTE: The DeploymentsBucket is defined in turbine-cluster.template for
the cluster instance configuration depends on it. This is in order to
avoid circular dependency between CIStack and AirflowStack.

Parameters:
SchedulerScalingGroup:
Type: String
WebserverScalingGroup:
Type: String
WorkerSetScalingGroup:
Type: String
DeploymentsBucket:
Type: String

Resources:

CodeDeployApplication:
Type: AWS::CodeDeploy::Application
Properties:
ApplicationName: !Sub ${AWS::StackName}-deployment-application
ComputePlatform: Server

CodeDeployDeploymentGroup:
Type: AWS::CodeDeploy::DeploymentGroup
Properties:
ApplicationName: !Ref CodeDeployApplication
DeploymentGroupName: !Sub ${AWS::StackName}-deployment-group
AutoScalingGroups:
- !Ref SchedulerScalingGroup
- !Ref WebserverScalingGroup
- !Ref WorkerSetScalingGroup
ServiceRoleArn: !GetAtt
- CodeDeployServiceRole
- Arn

CodeDeployServiceRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: 2012-10-17
Statement:
- Effect: Allow
Principal:
Service:
- codedeploy.amazonaws.com
Action:
- sts:AssumeRole
ManagedPolicyArns:
- 'arn:aws:iam::aws:policy/service-role/AWSCodeDeployRole'

# Custom CloudFormation hook that signals to target: Delete, Update, Create
CleanUpDeployments:
DependsOn: cleanupDeployBucket
Type: Custom::cleanupdeploy
Properties:
ServiceToken:
Fn::GetAtt:
- "cleanupDeployBucket"
- "Arn"
BucketName: !Ref DeploymentsBucket

# Removes all objects from Deployments Bucket upon cfn delete-stack
cleanupDeployBucket:
Type: "AWS::Lambda::Function"
Properties:
Code:
ZipFile: !Sub |
import boto3
import json
import logging
# module cfnresponse does not exist for python3.7, use requests instead
# import cfnresponse
# Will yield a warning, but is currently the only solution for python3.7
# since inline code cannot import third party packages
from botocore.vendored import requests
from botocore.exceptions import ClientError

logger = logging.getLogger(__name__)

def setup(level='DEBUG', boto_level=None, **kwargs):
logging.root.setLevel(level)

if not boto_level:
boto_level = level

logging.getLogger('boto').setLevel(boto_level)
logging.getLogger('boto3').setLevel(boto_level)
logging.getLogger('botocore').setLevel(boto_level)
logging.getLogger('urllib3').setLevel(boto_level)

try:
setup('DEBUG', formatter_cls=None, boto_level='ERROR')
except Exception as e:
logger.error(e, exc_info=True)

def clean_up_bucket(target_bucket):
logger.info(f"Clean content of bucket {target_bucket}.")
s3_resource = boto3.resource('s3')
try:
bucket_response = s3_resource.Bucket(target_bucket).load()
except ClientError as e:
logger.info(f"s3:://{target_bucket} not found. {e}")
return
else:
bucket_obj = s3_resource.Bucket(target_bucket)
bucket_obj.objects.all().delete()

def handler(event, context):
# helper(event, context)

response_data = {}
# NOTE: The status value sent by the custom resource provider must be either SUCCESS or FAILED!!
try:
bucket = event['ResourceProperties']['BucketName']
if event['RequestType'] == 'Delete':
clean_up_bucket(bucket)
if event['RequestType'] == 'Update':
logger.info(f"custom::cleanupbucket update. Target bucket: {bucket}")
if event['RequestType'] == 'Create':
logger.info(f"custom::cleanupbucket create. Target bucket: {bucket}")
send_response_cfn(event, context, "SUCCESS")
except Exception as e:
logger.info(str(e))
send_response_cfn(event, context, "FAILED")

def send_response_cfn(event, context, response_status):
response_body = {'Status': response_status,
'Reason': 'Log stream name: ' + context.log_stream_name,
'PhysicalResourceId': context.log_stream_name,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'Data': json.loads("{}")}
# Sends the response signal to the respective custom resource request
requests.put(event['ResponseURL'], data=json.dumps(response_body))
Description: cleanup Bucket on Delete Lambda function.
Handler: index.handler
Role: !GetAtt CleanupS3ExecutionRole.Arn
Runtime: python3.7
Timeout: 100

CleanupS3ExecutionRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service:
- lambda.amazonaws.com
Action:
- sts:AssumeRole
Path: "/"

CleanupS3ExecutionPolicy:
DependsOn:
- CleanupS3ExecutionRole
Type: AWS::IAM::Policy
Properties:
PolicyName: DeleteS3BucketLogsRolePolicy
Roles:
- Ref: CleanupS3ExecutionRole
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- logs:*
Resource:
- arn:aws:logs:*:*:*
- Effect: Allow
Action:
- s3:*
Resource:
- "*"

Outputs:
DeploymentsBucket:
Value: !Ref DeploymentsBucket
CodeDeployApplication:
Value: !Ref CodeDeployApplication
CodeDeployDeploymentGroup:
Value: !Ref CodeDeployDeploymentGroup
Loading