-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathupdate-stack
executable file
·259 lines (215 loc) · 7.65 KB
/
update-stack
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
#!/bin/bash
set -euo pipefail
set -x
EXTERNAL=${EXTERNAL:-0}
logfile=$(mktemp)
lambdazip=$(readlink -m package.zip)
db_paths=()
dbfiles=
cors=False
zone_id=
domain=
validation_domain=
prefix=
# Per https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-limits.html,
# the total unzipped size of the deployment package, including layers, must be
# less than 250 MB. Datasette and its deps are about 30 MB, so that leaves 220 MB
# for embedded SQLite DBs.
deployment_size_limit=220000000
# Per the same doc, the zip file we upload needs to be less than 50 MB.
zip_size_limit=50000000
# Per the same doc, /tmp has a 512 MB limit.
tmp_size_limit=512000000
finish() {
rm -f "${logfile}"
}
trap finish EXIT
stack_exists() {
local stack_name
stack_name=${1:?must provide stack name}
aws cloudformation describe-stacks --stack-name "${stack_name}" &> /dev/null
}
has_s3patch() {
which s3patch > /dev/null
}
s3cp() {
from=${1:?must specify local file, e.g. file.zip}
to=${2:?must specify remote file, e.g. s3://bucket/key}
if has_s3patch; then
s3patch -v cp "${from}" "${to}"
else
aws s3 cp "${from}" "${to}"
fi
}
get_hosted_zone() {
local domain id zone_domain output
domain=${1:?must provide domain, e.g. datasette-demo.code402.com}
# Try to find a Route 53 hosted zone that contains the given domain.
# Our approach is naive: look for a zone matching the domain. If none found,
# and the domain has a period, lop off the left-most subdomain and try again.
output=$(aws route53 list-hosted-zones-by-name --dns-name "${domain}" --max-items 1 --query "HostedZones[0].[Id, Name]" --output text)
IFS=$'\t' read -r id zone_domain < <(echo "$output")
if [ "${zone_domain}" == "${domain}." ]; then
echo "${id#/hostedzone/} ${zone_domain%.}"
return 0
else
# Recurse, if there's any zone left
IFS=. read -r _ domain < <(echo "${domain}")
if [ "${domain}" != "" ]; then
get_hosted_zone "${domain}"
else
return 1
fi
fi
}
create_or_update_stack() {
local stack_name
local rv
stack_name=${1:?must specify stack name}
shift
if ! stack_exists "${stack_name}"; then
aws cloudformation create-stack --stack-name="${stack_name}" "$@"
aws cloudformation wait stack-create-complete --stack-name="${stack_name}"
else
if aws cloudformation update-stack --stack-name="${stack_name}" "$@" |& tee "${logfile}"; then
aws cloudformation wait stack-update-complete --stack-name="${stack_name}"
else
rv=$?
# There must be a less janky way to detect this. When the template hasn't changed,
# the update-stack command will fail because there are no updates to perform.
# That's not what we'd like; so we check the output for this string. It'll
# likely fail to detect this case in non-English locales, though.
if grep --silent "No updates are to be performed" "${logfile}"; then
return 0
fi
return "${rv}"
fi
fi
}
create_lambda_zip() {
rm -f "${lambdazip}"
pushd app
zip --quiet -r "${lambdazip}" .
popd
}
parse_opts() {
local db_size=0 size file rv get_hosted_zone_output
rm -f app/metadata.json
rm -f app/config.txt
while [ "$#" -gt 0 ]; do
case "$1" in
--config)
shift
config_value=${1:?error: --config requires config value argument}
echo "${config_value}" >> app/config.txt
;;
--cors)
cors=True
;;
--domain)
shift
domain=${1:?error: --domain requires domain argument}
set +e
get_hosted_zone_output=$(get_hosted_zone "${domain}")
rv=$?
set -e
if [ "${rv}" != 0 ]; then
echo "error: could not find hosted zone for ${domain}" > /dev/stderr
exit 1
fi
read -r zone_id validation_domain < <(echo "${get_hosted_zone_output}")
;;
-m|--metadata)
shift
file=${1:?error: --metadata requires file argument}
if [ ! -e "${file}" ]; then
echo "error: --metadata points to non-existent file ${file}" > /dev/stderr
exit 2
fi
cp "${file}" app/metadata.json
;;
--prefix)
shift
prefix=${1:?error: --prefix requires prefix argument}
# Trim leading/trailing slashes to be robust against how the user enters
# the value.
prefix=${prefix#/}
prefix=${prefix%/}
;;
*)
db_paths+=("$1")
;;
esac
shift
done
if [ "${#db_paths[@]}" -eq 0 ]; then
echo "error: must specify at least one database" > /dev/stderr
exit 1
fi
# All the database files passed must exist.
for db in "${db_paths[@]}"; do
if [ ! -e "${db}" ]; then
echo "error: cannot find database ${db}" > /dev/stderr
exit 2
fi
# The aws-cli CloudFormation support is weird. If you pass commas,
# it tries to interpret it as a List<String> and fails.
# Lazy workaround is to separate using a non-comma.
dbfiles="${dbfiles}${dbfiles:+@}$(basename "${db}")"
size=$(stat --printf="%s" "${db}")
db_size=$((db_size + size))
done
if [ "${db_size}" -gt "${tmp_size_limit}" ]; then
echo "error: Lambda has a 512 MB limit, but total size of DBs is ${db_size}; cannot proceed" > /dev/stderr
exit 1
fi
if [ "${EXTERNAL}" == "0" ] && [ "${db_size}" -gt "${deployment_size_limit}" ]; then
echo "info: EXTERNAL=0 but total size of DBs is ${db_size}; falling back to EXTERNAL=1" > /dev/stderr
EXTERNAL=1
fi
}
main() {
stack_name=${1:?must provide CloudFormation stack name}
shift
parse_opts "$@"
create_lambda_zip
if [ "${EXTERNAL}" == "0" ]; then
for db in "${db_paths[@]}"; do
dir=$(dirname "${db}")
pushd "${dir}"
zip --quiet "${lambdazip}" "$(basename "${db}")"
popd
done
size=$(stat --printf="%s" "${lambdazip}")
if [ "${size}" -gt "${zip_size_limit}" ]; then
echo "info: EXTERNAL=0 but total size of zip is ${size}; falling back to EXTERNAL=1" > /dev/stderr
EXTERNAL=1
create_lambda_zip
fi
fi
create_or_update_stack "${stack_name}" --template-body=file://stack-prereq.yaml
bucket=$(aws cloudformation describe-stack-resources --stack-name "${stack_name}" --logical-resource-id S3Bucket --query StackResources[0].PhysicalResourceId --output text)
s3cp "${lambdazip}" s3://"$bucket"/package.zip
create_or_update_stack "${stack_name}-lambda" \
--capabilities CAPABILITY_IAM \
--parameters \
ParameterKey=Bucket,ParameterValue="${bucket}" \
ParameterKey=CORS,ParameterValue="${cors}" \
ParameterKey=DbFiles,ParameterValue="${dbfiles}" \
ParameterKey=Domain,ParameterValue="${domain}" \
ParameterKey=HostedZoneId,ParameterValue="${zone_id}" \
ParameterKey=Prefix,ParameterValue="${prefix}" \
ParameterKey=ValidationDomain,ParameterValue="${validation_domain}" \
--template-body=file://stack-lambda.yaml
if [ "${EXTERNAL}" == "1" ]; then
for db in "${db_paths[@]}"; do
s3cp "${db}" s3://"${bucket}"/"$(basename "${db}")"
done
fi
lambda=$(aws cloudformation describe-stack-resources --stack-name "${stack_name}-lambda" --logical-resource-id LambdaFunction --query StackResources[0].PhysicalResourceId --output text)
aws lambda update-function-code --function-name "$lambda" --s3-bucket "${bucket}" --s3-key package.zip
distribution_id=$(aws cloudformation describe-stack-resources --stack-name "${stack_name}-lambda" --logical-resource-id CloudFrontDistribution --query StackResources[0].PhysicalResourceId --output text)
aws cloudfront create-invalidation --distribution-id "${distribution_id}" --paths '/*'
aws cloudformation describe-stacks --stack-name "${stack_name}"-lambda --query Stacks[0].Outputs
}
main "$@"