-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathMakefile
322 lines (290 loc) · 21.4 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
SHELL:=/bin/bash
REQUIRED_BINARIES := kubectl cosign helm terraform kubectx kubecm ytt yq jq
WORKING_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
BOOTSTRAP_DIR := ${WORKING_DIR}/bootstrap
TERRAFORM_DIR := ${WORKING_DIR}/terraform
WORKLOAD_DIR := ${WORKING_DIR}/workloads
GITOPS_DIR := ${WORKING_DIR}/gitops
# kubectl api-resources --verbs=list --namespaced -o name > | xargs -n 1 kubectl get --show-kind --ignore-not-found -n longhorn-system
# kubectl get namespace "longhorn-system" -o json \
| tr -d "\n" | sed "s/\"finalizers\": \[[^]]\+\]/\"finalizers\": []/" \
| kubectl replace --raw /api/v1/namespaces/stucked-namespace/finalize -f -
# sudo sysctl -w net.bridge.bridge-nf-call-iptables=0
HARVESTER_CONTEXT := "deathstar"
VSPHERE_NAME := "vsphere"
BASE_URL := sienarfleet.systems
GITEA_URL := git.$(BASE_URL)
GIT_ADMIN_PASSWORD="C4rb1De_S3cr4t"
CLOUDFLARE_TOKEN=""
CERT_MANAGER_VERSION=1.10.2
RANCHER_VERSION=2.7.8
CLOUD_TOKEN_FILE=/Volumes/BIGBOY/keys/cloud_dns_account_key.json
BITNAMI_KEYCLOAK_RELEASE=16.1.2
# Carbide info
CARBIDE_TOKEN_FILE=/Volumes/BIGBOY/keys/carbide.yaml
CARBIDE_USER := $(shell yq e .token_id ${CARBIDE_TOKEN_FILE})
CARBIDE_PASSWORD:= $(shell yq e .token_password ${CARBIDE_TOKEN_FILE})
CARBIDE_LICENSE := $(shell yq e .license ${CARBIDE_TOKEN_FILE})
IMAGES_FILE=""
# Registry info
REGISTRY_URL := harbor.$(BASE_URL)
REGISTRY_USER=admin
REGISTRY_PASSWORD=
# Rancher on Harvester Info
RKE2_VIP=10.10.5.10
RANCHER_TARGET_NETWORK=services
RANCHER_URL := rancher.deathstar.${BASE_URL}
RANCHER_HA_MODE=true
RANCHER_CP_CPU_COUNT=4
RANCHER_CP_MEMORY_SIZE="8Gi"
RANCHER_WORKER_COUNT=0
RANCHER_NODE_SIZE="40Gi"
RANCHER_HARVESTER_WORKER_CPU_COUNT=4
RANCHER_HARVESTER_WORKER_MEMORY_SIZE="8Gi"
RANCHER_REPLICAS=3
HARVESTER_RANCHER_CLUSTER_NAME=rancher-harvester
RKE2_IMAGE_NAME=ubuntu-rke2-airgap-harvester
HARBOR_IMAGE_NAME=harbor-ubuntu
HARVESTER_RANCHER_CERT_SECRET=rancher_cert.yaml
HARVESTER_CERT_SECRET=harbor_cert.yaml
AIRGAP_IMAGE_HOST_IP=
# gitops automation vars
WORKLOADS_KAPP_APP_NAME=workloads
WORKLOADS_NAMESPACE=default
TARGET_CLUSTER=
RANDOM_PASSWORD := ${shell head /dev/urandom | LC_ALL=C tr -dc 'A-Za-z0-9' | head -c 13}
check-tools: ## Check to make sure you have the right tools
$(foreach exec,$(REQUIRED_BINARIES),\
$(if $(shell which $(exec)),,$(error "'$(exec)' not found. It is a dependency for this Makefile")))
# certificate targets
# CloudDNS holder: kubectl create secret generic clouddns-dns01-solver-svc-acct --from-file=key.json
certs: check-tools # needs CLOUD_TOKEN_FILE set and LOCAL_CLUSTER_NAME for non-default contexts
@printf "\n===>Making Certificates\n";
@kubectx ${HARVESTER_CONTEXT}
@helm install cert-manager ${BOOTSTRAP_DIR}/rancher/cert-manager-v1.8.1.tgz \
--namespace cert-manager \
--create-namespace \
--set installCRDs=true || true
@kubectl create secret generic clouddns-dns01-solver-svc-acct -n cert-manager --from-file=${CLOUD_TOKEN_FILE} --dry-run=client -o yaml | kubectl apply -f -
@kubectl apply -f $(BOOTSTRAP_DIR)/certs/issuer-prod-clouddns.yaml --dry-run=client -o yaml | kubectl apply -f -
@kubectl create ns harbor --dry-run=client -o yaml | kubectl apply -f -
@ytt -f $(BOOTSTRAP_DIR)/certs/cert-harbor.yaml -v base_url=$(BASE_URL) | kubectl apply -f -
@ytt -f $(BOOTSTRAP_DIR)/certs/cert-wildcard.yaml -v base_url=$(BASE_URL) | kubectl apply -f -
@ytt -f $(BOOTSTRAP_DIR)/certs/cert-svc-wildcard.yaml -v base_url=$(BASE_URL) | kubectl apply -f -
@kubectl create ns git --dry-run=client -o yaml | kubectl apply -f -
@ytt -f $(BOOTSTRAP_DIR)/certs/cert-gitea.yaml -v base_url=$(BASE_URL) | kubectl apply -f -
@ytt -f $(BOOTSTRAP_DIR)/certs/cert-rancherdeathstar.yaml -v base_url=$(BASE_URL) | kubectl apply -f -
certs-export: check-tools
@printf "\n===>Exporting Certificates\n";
@kubectx ${HARVESTER_CONTEXT}
@kubectl get secret -n harbor harbor-prod-homelab-certificate -o yaml > $(HARVESTER_CERT_SECRET) || true
@kubectl get secret -n git gitea-prod-certificate -o yaml > gitea_cert.yaml || true
@kubectl get secret wildcard-prod-certificate -o yaml > wildcard_cert.yaml || true
@kubectl get secret wildcard-svc-certificate -o yaml > wildcard_svc_cert.yaml || true
@kubectl get secret -n cattle-system tls-rancherdeathstar-ingress -o yaml | yq e '.metadata.name = "tls-rancher-ingress"' > rancherdeathstar_cert.yaml || true
certs-import: check-tools
@printf "\n===>Importing Certificates\n";
@kubectx ${HARVESTER_CONTEXT}
@kubectl apply -f harbor_cert.yaml
@kubectl apply -f gitea_cert.yaml
keycloak: check-tools
@printf "\n===>Deploying Keycloak\n";
@kubectx ${HARVESTER_CONTEXT}
@helm upgrade --install keycloak -n keycloak --create-namespace -f ${BOOTSTRAP_DIR}/keycloak/values.yaml ${BOOTSTRAP_DIR}/keycloak/keycloak-${BITNAMI_KEYCLOAK_RELEASE}.tgz
kubectl get secret wildcard-prod-certificate -o json | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' | kubectl apply --namespace=keycloak -f -
kubectl patch ingress keycloak -n keycloak -p '{"spec":{"tls":[{"hosts":["keycloak.sienarfleet.systems"],"secretName":"wildcard-prod-certificate"}]}}'
# # registry targets
# registry: check-tools
# @printf "\n===> Installing Registry\n";
# @kubectx ${HARVESTER_CONTEXT}
# @helm upgrade --install harbor ${BOOTSTRAP_DIR}/harbor/harbor-1.9.3.tgz \
# --version 1.9.3 -n harbor -f ${BOOTSTRAP_DIR}/harbor/values.yaml --create-namespace
# registry-delete: check-tools
# @printf "\n===> Deleting Registry\n";
# @kubectx ${HARVESTER_CONTEXT}
# @helm delete harbor -n harbor
# airgap targets
pull-rancher: check-tools
@printf "\n===>Pulling Rancher Images\n";
@${BOOTSTRAP_DIR}/airgap_images/pull_carbide_rancher $(CARBIDE_USER) '$(CARBIDE_PASSWORD)' '$(RANCHER_VERSION)' '${BOOTSTRAP_DIR}/airgap_images/blacklist'
@printf "\nIf successful, your images will be available at /tmp/rancher-images.tar.gz and /tmp/cert-manager.tar.gz"
pull-misc: check-tools
@printf "\n===>Pulling Misc Images\n";
@${BOOTSTRAP_DIR}/airgap_images/pull_misc
push-images: check-tools
@printf "\n===>Pushing Images to Harbor\n";
${BOOTSTRAP_DIR}/airgap_images/push_carbide $(REGISTRY_URL) $(REGISTRY_USER) '$(REGISTRY_PASSWORD)' $(IMAGES_FILE)
# registry targets
registry: check-tools
@printf "\n===> Installing Registry\n";
@kubectx $(HARVESTER_CONTEXT)
@helm upgrade --install harbor ${BOOTSTRAP_DIR}/harbor/harbor-1.9.3.tgz \
--version 1.9.3 -n harbor -f ${BOOTSTRAP_DIR}/harbor/values.yaml --create-namespace
registry-delete: check-tools
@printf "\n===> Deleting Registry\n";
@kubectx $(HARVESTER_CONTEXT)
@helm delete harbor -n harbor
# git targets
git: check-tools
@kubectx ${HARVESTER_CONTEXT}
@helm upgrade --install gitea $(BOOTSTRAP_DIR)/gitea/gitea-6.0.1.tgz \
--namespace git \
--set gitea.admin.password=$(GIT_ADMIN_PASSWORD) \
--set gitea.admin.username=gitea \
--set persistence.size=10Gi \
--set postgresql.persistence.size=1Gi \
--set gitea.config.server.ROOT_URL=https://$(GITEA_URL) \
--set gitea.config.server.DOMAIN=$(GITEA_URL) \
--set gitea.config.server.PROTOCOL=http \
-f $(BOOTSTRAP_DIR)/gitea/values.yaml
git-delete: check-tools
@kubectx $(HARVESTER_CONTEXT)
@printf "\n===> Deleting Gitea\n";
@helm delete gitea -n git
# vault targets
vault: check-tools
@kubectx ${HARVESTER_CONTEXT}
@helm upgrade --install vault $(BOOTSTRAP_DIR)/vault/vault-0.25.0.tgz \
--namespace vault \
--create-namespace \
-f $(BOOTSTRAP_DIR)/vault/values.yaml
@kubectl get secret wildcard-prod-certificate -o json | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' | kubectl apply --namespace=vault -f -
# helm upgrade --install vault-secrets-operator $(BOOTSTRAP_DIR)/vault/secrets-operator/vault-secrets-operator-0.1.0.tgz \
# --namespace vault-secrets-operator \
# --create-namespace \
# -f $(BOOTSTRAP_DIR)/vault/secrets-operator/values.yaml
### terraform main targets
_HARBOR_KEY=$(shell kubectl get secret -n harbor harbor-prod-homelab-certificate -o yaml | yq -e '.data."tls.key"' -)
_HARBOR_CERT=$(shell kubectl get secret -n harbor harbor-prod-homelab-certificate -o yaml | yq -e '.data."tls.crt"' -)
infra: check-tools
@printf "\n=====> Terraforming Infra\n";
@kubectx ${HARVESTER_CONTEXT}
@$(MAKE) _terraform-init COMPONENT=infra VARS='TF_VAR_harbor_url="$(REGISTRY_URL)" TF_VAR_ubuntu_image_name=$(RKE2_IMAGE_NAME) TF_VAR_host_ip=$(AIRGAP_IMAGE_HOST_IP) TF_VAR_port=9900'
@$(MAKE) _terraform COMPONENT=infra VARS='TF_VAR_harbor_url="$(REGISTRY_URL)" TF_VAR_ubuntu_image_name=$(RKE2_IMAGE_NAME) TF_VAR_host_ip=$(AIRGAP_IMAGE_HOST_IP) TF_VAR_port=9900'
@kubectl create ns services || true
@kubectl create ns dev || true
@kubectl create ns prod || true
jumpbox: check-tools
@printf "\n====> Terraforming Jumpbox\n";
@kubectx ${HARVESTER_CONTEXT}
@$(MAKE) _terraform COMPONENT=jumpbox
jumpbox-key: check-tools
@printf "\n====> Grabbing generated SSH key\n";
@kubectx ${HARVESTER_CONTEXT}
@$(MAKE) _terraform-value COMPONENT=jumpbox FIELD=".jumpbox_ssh_key.value"
jumpbox-destroy: check-tools
@printf "\n====> Destroying Jumpbox\n";
@kubectx ${HARVESTER_CONTEXT}
@$(MAKE) _terraform-destroy COMPONENT=jumpbox
image: check-tools
@printf "\n=====> Downloading Airgapped Image\n";
@kubectx ${HARVESTER_CONTEXT}
@$(MAKE) _terraform COMPONENT=image VARS='TF_VAR_host_ip=$(AIRGAP_IMAGE_HOST_IP) TF_VAR_port=9900 TF_VAR_image_name=$(RKE2_IMAGE_NAME)'
rancher: check-tools # state stored in Harvester K8S
@printf "\n====> Terraforming RKE2 + Rancher\n";
@kubecm delete $(HARVESTER_RANCHER_CLUSTER_NAME) > /dev/null 2>&1 || true
@kubectx ${HARVESTER_CONTEXT}
@$(MAKE) _terraform COMPONENT=rancher VARS='TF_VAR_carbide_username="$(CARBIDE_USER)" TF_VAR_carbide_password="$(CARBIDE_PASSWORD)" TF_VAR_rancher_server_dns="$(RANCHER_URL)" TF_VAR_master_vip="$(RKE2_VIP)" TF_VAR_registry_url="$(REGISTRY_URL)" TF_VAR_control_plane_cpu_count=$(RANCHER_CP_CPU_COUNT) TF_VAR_control_plane_memory_size=$(RANCHER_CP_MEMORY_SIZE) TF_VAR_worker_count=$(RANCHER_WORKER_COUNT) TF_VAR_control_plane_ha_mode=$(RANCHER_HA_MODE) TF_VAR_node_disk_size=$(RANCHER_NODE_SIZE) TF_VAR_worker_cpu_count=$(RANCHER_HARVESTER_WORKER_CPU_COUNT) TF_VAR_worker_memory_size=$(RANCHER_HARVESTER_WORKER_MEMORY_SIZE) TF_VAR_target_network_name=$(RANCHER_TARGET_NETWORK) TF_VAR_harvester_rke2_image_name=${shell kubectl get virtualmachineimage -o yaml | yq -e '.items[]|select(.spec.displayName=="$(RKE2_IMAGE_NAME)")' - | yq -e '.metadata.name' -}'
@cp ${TERRAFORM_DIR}/rancher/kube_config_server.yaml /tmp/$(HARVESTER_RANCHER_CLUSTER_NAME).yaml && kubecm add -c -f /tmp/$(HARVESTER_RANCHER_CLUSTER_NAME).yaml && rm /tmp/$(HARVESTER_RANCHER_CLUSTER_NAME).yaml
@kubectl get secret -n cattle-system tls-rancherdeathstar-ingress -o yaml | yq e '.metadata.name = "tls-rancher-ingress"' > $(HARVESTER_RANCHER_CERT_SECRET)
@kubectx $(HARVESTER_RANCHER_CLUSTER_NAME)
@helm upgrade --install rancher -n cattle-system --create-namespace --set hostname=$(RANCHER_URL) --set replicas=$(RANCHER_REPLICAS) --set bootstrapPassword=admin --set rancherImage=$(REGISTRY_URL)/rancher/rancher --set "carbide.whitelabel.image=$(REGISTRY_URL)/carbide/carbide-whitelabel" --set systemDefaultRegistry=$(REGISTRY_URL) --set ingress.tls.source=secret --set useBundledSystemChart=true $(BOOTSTRAP_DIR)/rancher/carbide-rancher-v$(RANCHER_VERSION).tgz
@cat $(HARVESTER_RANCHER_CERT_SECRET) | yq e '.metadata.resourceVersion = null' | yq e '.metadata.uid = null' | kubectl apply -f - || true
@kubectl create ns fleet-default || true
@kubectl create secret generic --type kubernetes.io/basic-auth carbide-registry -n fleet-default --from-literal=username=${CARBIDE_USER} --from-literal=password=${CARBIDE_PASSWORD} --dry-run=client -o yaml | kubectl apply -f - 2>&1 | grep -i -v "Warn" | grep -i -v "Deprecat"
@until [ $$(curl -sk https://${RANCHER_URL}/v3-public/authtokens | grep uuid | wc -l) = 1 ]; do sleep 2; echo -e -n "."; done
@printf "Rancher installed, now onto bootstrapping...\n";
@$(MAKE) rancher-bootstrap
rancher-bootstrap:
@printf "\n====> Bootstrapping Rancher\n";
@kubectx $(HARVESTER_RANCHER_CLUSTER_NAME)
@printf "\nRancher password: ${RANDOM_PASSWORD}\n";
@curl -sk https://${RANCHER_URL}/v3/users?action=changepassword -H 'content-type: application/json' -H "Authorization: Bearer $$(curl -sk -X POST https://${RANCHER_URL}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"admin"}' | jq -r '.token')" -d '{"currentPassword":"admin","newPassword":"'${RANDOM_PASSWORD}'"}' > /dev/null 2>&1
@curl -sk https://${RANCHER_URL}/v3/settings/server-url -H 'content-type: application/json' -H "Authorization: Bearer $$(curl -sk -X POST https://${RANCHER_URL}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"${RANDOM_PASSWORD}"}' | jq -r '.token')" -X PUT -d '{"name":"server-url","value":"https://${RANCHER_URL}"}' > /dev/null 2>&1
@curl -sk https://${RANCHER_URL}/v3/settings/telemetry-opt -X PUT -H 'content-type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $$(curl -sk -X POST https://${RANCHER_URL}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"${RANDOM_PASSWORD}"}' | jq -r '.token')" -d '{"value":"out"}' > /dev/null 2>&1
@curl -sk https://${RANCHER_URL}/v1/catalog.cattle.io.clusterrepos -H 'content-type: application/json' -H "Authorization: Bearer $$(curl -sk -X POST https://${RANCHER_URL}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"${RANDOM_PASSWORD}"}' | jq -r '.token')" -d '{"type":"catalog.cattle.io.clusterrepo","metadata":{"name":"rancher-ui-plugins"},"spec":{"gitBranch":"main","gitRepo":"https://github.com/rancher/ui-plugin-charts"}}' > /dev/null 2>&1
@curl -sk https://${RANCHER_URL}/v1/catalog.cattle.io.clusterrepos/rancher-charts?action=install -H 'content-type: application/json' -H "Authorization: Bearer $$(curl -sk -X POST https://${RANCHER_URL}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"${RANDOM_PASSWORD}"}' | jq -r '.token')" -d '{"charts":[{"chartName":"ui-plugin-operator-crd","version":"102.0.1+up0.2.1","releaseName":"ui-plugin-operator-crd","annotations":{"catalog.cattle.io/ui-source-repo-type":"cluster","catalog.cattle.io/ui-source-repo":"rancher-charts"}}],"wait":true,"namespace":"cattle-ui-plugin-system"}'
@curl -sk https://${RANCHER_URL}/v1/catalog.cattle.io.clusterrepos/rancher-charts?action=install -H 'content-type: application/json' -H "Authorization: Bearer $$(curl -sk -X POST https://${RANCHER_URL}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"${RANDOM_PASSWORD}"}' | jq -r '.token')" -d '{"charts":[{"chartName":"ui-plugin-operator","version":"102.0.1+up0.2.1","releaseName":"ui-plugin-operator","annotations":{"catalog.cattle.io/ui-source-repo-type":"cluster","catalog.cattle.io/ui-source-repo":"rancher-charts"}}],"wait":true,"namespace":"cattle-ui-plugin-system"}'
@kubectl create ns carbide-stigatron-system --dry-run=client -o yaml | kubectl apply -f -
@sleep 10
@helm upgrade --install -n carbide-stigatron-system --create-namespace stigatron-ui --no-hooks --set global.cattle.systemDefaultRegistry=$(REGISTRY_URL) $(BOOTSTRAP_DIR)/rancher/stigatron-ui-0.1.22.tgz
rancher-delete: rancher-destroy
rancher-destroy: check-tools
@printf "\n====> Destroying RKE2 + Rancher\n";
@kubectx ${HARVESTER_CONTEXT}
@MACHINE_NAME=$$(kubectl get virtualmachineimage -o yaml | yq -e '.items[]|select(.spec.displayName=="$(RKE2_IMAGE_NAME)")' - | yq -e '.metadata.name' -); set -eux;\
$(MAKE) _terraform-destroy COMPONENT=rancher VARS='TF_VAR_carbide_username="$(CARBIDE_USER)" TF_VAR_carbide_password="$(CARBIDE_PASSWORD)" TF_VAR_target_network_name=$(RANCHER_TARGET_NETWORK) TF_VAR_harvester_rke2_image_name='"$$MACHINE_NAME"''
@kubecm delete $(HARVESTER_RANCHER_CLUSTER_NAME) || true
cloud-provider-creds: check-tools
@printf "\n===> Creating Cloud Provider creds for all nodes\n";
@kubectx ${HARVESTER_RANCHER_CLUSTER_NAME}
@curl -sk -X POST https://$(RANCHER_URL)/k8s/clusters/$$(kubectl get clusters.management.cattle.io -o yaml | yq e '.items[] | select(.metadata.labels."provider.cattle.io" == "harvester")'.metadata.name)/v1/harvester/kubeconfig \
-H 'Content-Type: application/json' \
-H "Authorization: Bearer $(shell curl -sk -X POST https://${RANCHER_URL}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"${PASSWORD}"}' | jq -r '.token')" \
-d '{"clusterRoleName": "harvesterhci.io:cloudprovider", "namespace": "default", "serviceAccountName": "deathstar"}' | xargs | sed 's/\\n/\n/g' > deathstar-kubeconfig
@kubectl create secret generic services-shared-cloudprovider -n fleet-default --from-file=credential=deathstar-kubeconfig --dry-run=client -o yaml | kubectl apply -f -
@kubectl create secret generic devfluffymunchkin-cloudprovider -n fleet-default --from-file=credential=deathstar-kubeconfig --dry-run=client -o yaml | kubectl apply -f -
@kubectl create secret generic devedgerunner-cloudprovider -n fleet-default --from-file=credential=deathstar-kubeconfig --dry-run=client -o yaml | kubectl apply -f -
@kubectl create secret generic prodblue-cloudprovider -n fleet-default --from-file=credential=deathstar-kubeconfig --dry-run=client -o yaml | kubectl apply -f -
@kubectl annotate secret services-shared-cloudprovider -n fleet-default --overwrite v2prov-secret-authorized-for-cluster='services-shared'
@kubectl annotate secret devedgerunner-cloudprovider -n fleet-default --overwrite v2prov-secret-authorized-for-cluster='dev-edgerunner'
@kubectl annotate secret devfluffymunchkin-cloudprovider -n fleet-default --overwrite v2prov-secret-authorized-for-cluster='dev-fluffymunchkin'
@kubectl annotate secret prodblue-cloudprovider -n fleet-default --overwrite v2prov-secret-authorized-for-cluster='prod-blue'
@rm deathstar-kubeconfig
wildcard-cert: check-tools
@kubectx ${HARVESTER_CONTEXT}
@kubectl get secret wildcard-svc-certificate -o yaml > wildcard_svc_cert.yaml
@printf "\nCert available here: wildcard_svc_cert.yaml\n";
# gitops targets
# this only works if harvester cluster has been imported
_HARVESTER_CLUSTER_NAME = $(shell kubectl get cluster $(HARVESTER_CONTEXT) -n fleet-default -o yaml | yq -e '.status.clusterName' | tr -d '\n' | base64)
_HARVESTER_SECRET_NAME = $(shell kubectl get secret -n cattle-global-data -o yaml | yq -e '.items[] | select(.data.harvestercredentialConfig-clusterId == '\"$(_HARVESTER_CLUSTER_NAME)\"')' | yq -e .metadata.name)
_VSPHERE_SECRET_NAME = $(shell kubectl get secret -n cattle-global-data -o yaml | yq -e '.items[] | select(.metadata.annotations."field.cattle.io/name" == '\"$(VSPHERE_NAME)\"')' | yq -e .metadata.name)
workloads-check: check-tools
@printf "\n===> Synchronizing Workloads with Fleet (dry-run)\n";
@kubectx $(HARVESTER_RANCHER_CLUSTER_NAME)
@ytt -f $(WORKLOAD_DIR) | kapp deploy -a $(WORKLOADS_KAPP_APP_NAME) -n $(WORKLOADS_NAMESPACE) -f -
workloads-yes:
@printf "\n===> Synchronizing Workloads with Fleet\n";
@kubectx ${HARVESTER_RANCHER_CLUSTER_NAME}
@kubectl get secret -n cattle-global-data $(HARVESTER_CONTEXT) || kubectl get secret -n cattle-global-data $(_HARVESTER_SECRET_NAME) -o yaml | yq -e '.metadata.name = $(HARVESTER_CONTEXT)' | yq -e '.metadata.annotations."field.cattle.io/name" = $(HARVESTER_CONTEXT)' - | kubectl apply -f -
@kubectl get secret -n cattle-global-data $(VSPHERE_NAME) || kubectl get secret -n cattle-global-data $(_VSPHERE_SECRET_NAME) -o yaml | yq -e '.metadata.name = $(VSPHERE_NAME)' | kubectl apply -f -
@ytt -f $(WORKLOAD_DIR) | kapp deploy -a $(WORKLOADS_KAPP_APP_NAME) -n $(WORKLOADS_NAMESPACE) -f - -y
workloads-destroy: workloads-delete
workloads-delete: check-tools
@printf "\n===> Deleting Workloads with Fleet\n";
@kubectx $(HARVESTER_RANCHER_CLUSTER_NAME)
@kapp delete -a $(WORKLOADS_KAPP_APP_NAME) -n $(WORKLOADS_NAMESPACE)
status: check-tools
@printf "\n===> Inspecting Running Workloads in Fleet\n";
@kubectx ${HARVESTER_RANCHER_CLUSTER_NAME}
@kapp inspect -a $(WORKLOADS_KAPP_APP_NAME) -n $(WORKLOADS_NAMESPACE)
NODE1_IP=10.10.0.11
NODE2_IP=10.10.0.12
NODE3_IP=10.10.0.13
patch: check-tools
@printf "\n===> Patching Harvester Nodes\n"
@printf "\n**** iptable Bug\n"
@ssh rancher@${NODE1_IP} "sudo sysctl -w net.bridge.bridge-nf-call-iptables=0"
@ssh rancher@${NODE2_IP} "sudo sysctl -w net.bridge.bridge-nf-call-iptables=0"
@ssh rancher@${NODE3_IP} "sudo sysctl -w net.bridge.bridge-nf-call-iptables=0"
# downstream
carbide-license: check-tools
@printf "\n===>Creating Carbide License\n";
@printf "Copy-paste this into your target cluster shell:\nkubectl create namespace carbide-stigatron-system; kubectl create secret generic stigatron-license -n carbide-stigatron-system --from-literal=license=${CARBIDE_LICENSE} --dry-run=client -o yaml | kubectl apply -f -\n"
# terraform sub-targets (don't use directly)
_terraform: check-tools
@kubectx ${HARVESTER_CONTEXT}
@$(VARS) terraform -chdir=${TERRAFORM_DIR}/$(COMPONENT) apply
_terraform-init: check-tools
@kubectx ${HARVESTER_CONTEXT}
@$(VARS) terraform -chdir=${TERRAFORM_DIR}/$(COMPONENT) init
_terraform-apply: check-tools
@kubectx ${HARVESTER_CONTEXT}
@$(VARS) terraform -chdir=${TERRAFORM_DIR}/$(COMPONENT) apply
_terraform-value: check-tools
@kubectx ${HARVESTER_CONTEXT}
@terraform -chdir=${TERRAFORM_DIR}/$(COMPONENT) output -json | jq -r .$(FIELD).value
_terraform-destroy: check-tools
@kubectx ${HARVESTER_CONTEXT}
@$(VARS) terraform -chdir=${TERRAFORM_DIR}/$(COMPONENT) destroy