-
Notifications
You must be signed in to change notification settings - Fork 34
/
Copy patharo.azcli
1340 lines (1230 loc) · 53.6 KB
/
aro.azcli
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Deploy ARO and install extension
# az provider register -n Microsoft.RedHatOpenShift --wait
# az extension add -n aro --index https://az.aroapp.io/preview
# Control
ingress_visibility=Public
api_visibility=Public
custom_domain=no
domain=cloudtrooper.net # If using custom_domain=yes. Assumes you own this domain and have it managed by Azure DNS
nat_gateway=no # Just for testing, this is not supported
# Variables
rg=aro
cluster_name=aro
cluster_rg=${cluster_name}-$RANDOM
location=northeurope
vnet_name=arovnet
vnet_prefix=192.168.0.0/24
workers_subnet_name=workers
workers_subnet_prefix=192.168.0.0/27
masters_subnet_name=masters
masters_subnet_prefix=192.168.0.32/27
appgw_subnet_name=ApplicationGatewaySubnet
appgw_subnet_prefix=192.168.0.96/28
vm_subnet_name=vm
vm_subnet_prefix=192.168.0.96/28
sql_subnet_name=sql
sql_subnet_prefix=192.168.0.112/28
ilb_subnet_name=apps
ilb_subnet_prefix=192.168.0.128/28
anf_subnet_name=anf
anf_subnet_prefix=192.168.0.192/27
anf_location=$location
pls_subnet_name=PrivateLinkService
pls_subnet_prefix=192.168.0.64/28
master_vm_size=Standard_D8s_v3
worker_vm_size=Standard_D4s_v3
worker_vm_count=3
pod_cidr=10.128.0.0/14
service_cidr=172.30.0.0/16
arc_name=arcaro
azmonitor_usearc=yes
# Create RG if it did not exist
rg_location=$(az group show -n $rg --query location -o tsv)
if [[ -z "$rg_location" ]]
then
echo "Creating RG ${rg} in ${location}..."
az group create -n $rg -l $location
else
if [[ ${rg_location} == ${location} ]]
then
echo "RG $rg already exists in $rg_location"
else
echo "RG $rg already exists, but in $rg_location instead of the specified location $location"
fi
fi
# Create vnet if it did not exist
vnet_location=$(az network vnet show -n $vnet_name -g $rg --query location -o tsv 2>/dev/null)
if [[ -z "${vnet_location}" ]]
then
echo "Creating Virtual Network..."
az network vnet create -n $vnet_name -g $rg --address-prefixes $vnet_prefix --subnet-name $workers_subnet_name --subnet-prefixes $workers_subnet_prefix -o none
az network vnet subnet create -n $masters_subnet_name --vnet-name $vnet_name -g $rg --address-prefixes $masters_subnet_prefix -o none
az network vnet subnet update -n $masters_subnet_name -g $rg --vnet-name $vnet_name --disable-private-link-service-network-policies true -o none
masters_subnet_id=$(az network vnet subnet show -g $rg --vnet-name $vnet_name -n $masters_subnet_name --query id -o tsv) -o none
az network vnet subnet update -n $masters_subnet_name -g $rg --vnet-name $vnet_name --service-endpoints Microsoft.ContainerRegistry -o none
az network vnet subnet update -n $workers_subnet_name -g $rg --vnet-name $vnet_name --service-endpoints Microsoft.ContainerRegistry -o none
else
echo "Vnet $vnet_name already exists"
fi
if [[ "$nat_gateway" == "yes" ]]
then
echo "Creating NAT Gateway..."
az network public-ip create -n nat-pip -g $rg --sku Standard -o none
az network nat gateway create -n aronatgw -g $rg \
--public-ip-addresses nat-pip --idle-timeout 10 -o none
az network vnet subnet update -n $workers_subnet_name --vnet-name $vnet_name -g $rg --nat-gateway aronatgw -o none
# az network vnet subnet update -n $masters_subnet_name --vnet-name $vnet_name -g $rg --nat-gateway aronatgw
fi
# Service Principal: retrieve from AKV
purpose=aro
keyvault_name=erjositoKeyvault
keyvault_appid_secret_name=$purpose-sp-appid
keyvault_password_secret_name=$purpose-sp-secret
sp_app_id=$(az keyvault secret show --vault-name $keyvault_name -n $keyvault_appid_secret_name --query 'value' -o tsv)
sp_app_secret=$(az keyvault secret show --vault-name $keyvault_name -n $keyvault_password_secret_name --query 'value' -o tsv)
# If no SP could be retrieved from AKV, create one
if [[ -z "$sp_app_id" ]] || [[ -z "$sp_app_secret" ]]
then
echo "Creating service principal credentials and storing in AKV $keyvault_name..."
purpose=aro
keyvault_name=erjositoKeyvault
keyvault_appid_secret_name=$purpose-sp-appid
keyvault_password_secret_name=$purpose-sp-secret
sp_name=$purpose
sp_output=$(az ad sp create-for-rbac --name $sp_name --skip-assignment)
sp_app_id=$(echo $sp_output | jq -r '.appId')
sp_app_secret=$(echo $sp_output | jq -r '.password')
az keyvault secret set --vault-name $keyvault_name --name $keyvault_appid_secret_name --value $sp_app_id -o none
az keyvault secret set --vault-name $keyvault_name --name $keyvault_password_secret_name --value $sp_app_secret -o none
else
echo "Service principal credentials successfully retrieved from AKV $keyvault_name"
fi
# Assign permissions to the vnet for the SP
vnet_id=$(az network vnet show -n $vnet_name -g $rg --query id -o tsv)
echo "Assigning Network Contributor role to SP $sp_app_id..."
az role assignment create --scope $vnet_id --assignee $sp_app_id --role 'Network Contributor' -o none
# Show credentials expiration date
sp_credentials_enddate=$(az ad sp credential list --id $sp_app_id --query '[].endDate' -o tsv)
echo "Service principal credentials expiration: $sp_credentials_enddate"
# Get pull secret
akv_name=joseakv-airs
akv_secret_name=openshift-pull-secret
pull_secret=$(az keyvault secret show -n $akv_secret_name --vault-name $akv_name --query value -o tsv)
if [[ -z "${pull_secret}" ]]
then
echo "Pull secret could not be retrieved from AKV $akv_name"
pullsecret_flag=""
else
echo "Pull secret successfully retrieved from AKV $akv_name"
pullsecret_flag="--pull-secret $pull_secret"
fi
# Add optionally the flag for custom domain
if [[ "${custom_domain}" == "yes" ]]
then
domain_flag="--domain $domain"
else
domain_flag=""
fi
# Create cluster (detailed)
# The '(z)' trick for the variable flags is something zsh specific
echo "Creating ARO cluster, this is going to take some minutes..."
az aro create -n $cluster_name -g $rg --worker-subnet workers --master-subnet masters --vnet $vnet_name \
--master-vm-size $master_vm_size --worker-vm-size $worker_vm_size --worker-count $worker_vm_count \
--worker-vm-disk-size-gb 128 \
--client-id $sp_app_id --client-secret $sp_app_secret \
--ingress-visibility $ingress_visibility --apiserver-visibility $api_visibility \
--tags sampletag1=value1 sampletag2=value2 \
--cluster-resource-group $cluster_rg \
--pod-cidr $pod_cidr --service-cidr $service_cidr ${(z)domain_flag} ${(z)pullsecret_flag} -o none
#############
# Cleanup #
#############
function cleanup_aro {
if [[ "$nat_gateway" == "yes" ]]
then
echo "Cleaning up nat gateway..."
az network vnet subnet update -n $workers_subnet_name --vnet-name $vnet_name -g $rg --nat-gateway ""
az network vnet subnet update -n $masters_subnet_name --vnet-name $vnet_name -g $rg --nat-gateway ""
az network nat gateway delete -n aronatgw -g $rg
az network public-ip delete -n nat-pip -g $rg
fi
echo "Deleting ARO cluster..."
az aro delete -n $cluster_name -g $rg -y --no-wait
}
###################
# DNS setup #
###################
# Configure DNS if custom_domain was selected and API visibility is public
if [[ "${custom_domain}" == "yes" ]] && [[ "$api_visibility" == "Public" ]]
then
dns_zone_name=cloudtrooper.net
dns_subdomain=aro
dns_console_hostname=console-openshift-console.apps
dns_oauth_hostname=oauth-openshift.apps
dns_api_hostname=api
dns_zone_rg=$(az network dns zone list --query "[?name=='$dns_zone_name'].resourceGroup" -o tsv)
aro_api_ip=$(az aro show -n $cluster_name -g $rg --query 'apiserverProfile.ip' -o tsv)
aro_ingress_ip=$(az aro show -n $cluster_name -g $rg --query 'ingressProfiles[0].ip' -o tsv)
dns_console_fqdn=$dns_console_hostname.$dns_subdomain.$dns_zone_name
dns_oauth_fqdn=$dns_oauth_hostname.$dns_subdomain.$dns_zone_name
dns_api_fqdn=$dns_api_hostname.$dns_subdomain.$dns_zone_name
echo "Adding A record $dns_console_fqdn for IP $aro_ingress_ip"
az network dns record-set a delete -z $dns_zone_name -g $dns_zone_rg -n $dns_console_hostname.$dns_subdomain -y
az network dns record-set a add-record -z $dns_zone_name -g $dns_zone_rg -n $dns_console_hostname.$dns_subdomain -a $aro_ingress_ip
echo "Adding A record $dns_api_fqdn for IP $aro_api_ip"
az network dns record-set a delete -z $dns_zone_name -g $dns_zone_rg -n $dns_api_hostname.$dns_subdomain -y
az network dns record-set a add-record -z $dns_zone_name -g $dns_zone_rg -n $dns_api_hostname.$dns_subdomain -a $aro_api_ip
echo "Adding A record $dns_oauth_fqdn for IP $aro_ingress_ip"
az network dns record-set a delete -z $dns_zone_name -g $dns_zone_rg -n $dns_oauth_hostname.$dns_subdomain -y
az network dns record-set a add-record -z $dns_zone_name -g $dns_zone_rg -n $dns_oauth_hostname.$dns_subdomain -a $aro_ingress_ip
nslookup $dns_console_fqdn
nslookup $dns_oauth_fqdn
nslookup $dns_api_fqdn
# Verify records
az network dns record-set list -z $dns_zone_name -g $dns_zone_rg -o table
echo "A records for $dns_api_fqdn:"
az network dns record-set a show -z $dns_zone_name -g $dns_zone_rg -n $dns_api_hostname.$dns_subdomain --query arecords -o table
echo "A records for $dns_console_fqdn:"
az network dns record-set a show -z $dns_zone_name -g $dns_zone_rg -n $dns_console_hostname.$dns_subdomain --query arecords -o table
echo "A records for $dns_oauth_fqdn"
az network dns record-set a show -z $dns_zone_name -g $dns_zone_rg -n $dns_oauth_hostname.$dns_subdomain --query arecords -o table
else
echo "No custom domain specified, no DNS records need to be added"
fi
###################
# Login #
###################
# Credentials
# az aro list-credentials -n $cluster_name -g $rg
aro_usr=$(az aro list-credentials -n $cluster_name -g $rg --query kubeadminUsername -o tsv)
aro_pwd=$(az aro list-credentials -n $cluster_name -g $rg --query kubeadminPassword -o tsv)
aro_api_url=$(az aro show -n $cluster_name -g $rg --query 'apiserverProfile.url' -o tsv)
oc login $aro_api_url -u $aro_usr -p $aro_pwd
# echo "Login with the command \"oc login $aro_api_url -u $aro_usr -p $aro_pwd\""
# oc login $aro_api_url -u $aro_usr -p $aro_pwd --insecure-skip-tls-verify=true
# echo "$aro_usr / $aro_pwd"
# Console
aro_console_url=$(az aro show -n $cluster_name -g $rg --query 'consoleProfile.url' -o tsv)
echo "Connect to $aro_console_url (username kubeadmin, password $aro_pwd)"
# URLs:
# echo "API: $aro_api_url"
# echo "Console: $aro_console_url"
##################################
# Deploy sample app (public API) #
##################################
# Create pods
project_name=kuard
oc new-project $project_name
oc new-app --docker-image gcr.io/kuar-demo/kuard-amd64:1
# Expose with clusterIP and router
oc expose deploy kuard-amd64 --port 8080 --name kuard
oc expose svc kuard --name kuard
# Expose with an internal ALB
oc expose dc kuard-amd64 --port 8080 --type=LoadBalancer --name=kuardilb --dry-run -o yaml | awk '1;/metadata:/{ print " annotations:\n service.beta.kubernetes.io/azure-load-balancer-internal: \"true\"" }' | oc create -f -
# Expose with an internal ALB in different subnet
ilb_subnet_name=apps
oc expose dc kuard-amd64 --port 8080 --type=LoadBalancer --name=kuard --dry-run -o yaml | awk '1;/metadata:/{ print " annotations:\n service.beta.kubernetes.io/azure-load-balancer-internal: \"true\"\n service.beta.kubernetes.io/azure-load-balancer-internal-subnet: \"'${ilb_subnet_name}'\"" }' | oc create -f -
# Exposing existing ClusterIP Svc over a route
oc expose svc kuardilb
# Expose with a public ALB
oc expose deploy kuard-amd64 --port 80 --type=LoadBalancer --name=kuardplb
# Sample pod running in privileged mode
yaml_file=/tmp/privileged.yml
cat <<EOF > $yaml_file
apiVersion: v1
kind: Pod
metadata:
name: nginx-privileged
spec:
containers:
- name: nginx-privileged
image: mcr.microsoft.com/oss/nginx/nginx:1.15.5-alpine
securityContext:
privileged: true
EOF
kubectl apply -f $yaml_file
############################
# Machine Sets and CA #
############################
# Get initial configuration
oc get node
oc get machine -n openshift-machine-api
oc get machinesets -n openshift-machine-api
oc get clusterautoscaler -n openshift-machine-api
# Deploy CA
# https://docs.openshift.com/container-platform/4.7/machine_management/applying-autoscaling.html
cat <<EOF | kubectl apply -f -
apiVersion: "autoscaling.openshift.io/v1"
kind: "ClusterAutoscaler"
metadata:
name: "default"
namespace: "openshift-machine-api"
spec:
podPriorityThreshold: -10
resourceLimits:
maxNodesTotal: 12
scaleDown:
enabled: true
delayAfterAdd: 10s
delayAfterDelete: 10s
delayAfterFailure: 10s
EOF
oc get clusterautoscaler -n openshift-machine-api
oc describe clusterautoscaler -n openshift-machine-api
# Deploy machine autoscalers for each machineset
machineset_list=$(oc get machinesets -n openshift-machine-api -o json | jq -r '.items[].metadata.name')
echo "$machineset_list" | while read machineset_name
do
cat <<EOF | kubectl apply -f -
apiVersion: autoscaling.openshift.io/v1beta1
kind: MachineAutoscaler
metadata:
name: autoscale-$machineset_name
namespace: openshift-machine-api
spec:
minReplicas: 1
maxReplicas: 3
scaleTargetRef:
apiVersion: machine.openshift.io/v1beta1
kind: MachineSet
name: $machineset_name
EOF
done
oc get machineautoscaler -n openshift-machine-api
# Create sample deployment with 1CPU resource requests, and scale it up/down
project_name=autoscalerdemo
oc new-project $project_name
cat <<EOF | kubectl apply -n $project_name -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: autoscaler-demo
labels:
app: autoscaler-demo
spec:
replicas: 3
selector:
matchLabels:
app: autoscaler-demo
template:
metadata:
labels:
app: autoscaler-demo
spec:
containers:
- name: kuard
image: gcr.io/kuar-demo/kuard-amd64:1
resources:
requests:
memory: "64Mi"
cpu: "1000m"
limits:
memory: "128Mi"
cpu: "2000m"
ports:
- containerPort: 8080
EOF
oc get deploy -n $project_name
kubectl get pod -n $project_name
kubectl scale --replicas=3 -n $project_name deploy/autoscaler-demo
oc get machinesets -n openshift-machine-api
oc get machines -n openshift-machine-api
# Deploy additional machineset
cat <<EOF | kubectl apply -n openshift-machine-api -f -
apiVersion: machine.openshift.io/v1beta1
kind: MachineSet
metadata:
annotations:
machine.openshift.io/GPU: '0'
machine.openshift.io/memoryMb: '16384'
machine.openshift.io/vCPU: '4'
resourceVersion: '33181'
name: aro-jose-worker-northeurope1
generation: 1
namespace: openshift-machine-api
labels:
machine.openshift.io/cluster-api-cluster: aro-lfhz9
machine.openshift.io/cluster-api-machine-role: worker
machine.openshift.io/cluster-api-machine-type: worker
spec:
replicas: 1
selector:
matchLabels:
machine.openshift.io/cluster-api-cluster: aro-lfhz9
machine.openshift.io/cluster-api-machineset: aro-jose-worker-northeurope1
template:
metadata:
labels:
machine.openshift.io/cluster-api-cluster: aro-lfhz9
machine.openshift.io/cluster-api-machine-role: worker
machine.openshift.io/cluster-api-machine-type: worker
machine.openshift.io/cluster-api-machineset: aro-jose-worker-northeurope1
spec:
metadata: {}
providerSpec:
value:
osDisk:
diskSizeGB: 128
managedDisk:
storageAccountType: Premium_LRS
osType: Linux
networkResourceGroup: aro
publicLoadBalancer: aro-lfhz9
userDataSecret:
name: worker-user-data
vnet: arovnet
credentialsSecret:
name: azure-cloud-credentials
namespace: openshift-machine-api
zone: '1'
metadata:
creationTimestamp: null
publicIP: false
resourceGroup: aro-15847
kind: AzureMachineProviderSpec
location: northeurope
vmSize: Standard_D4s_v3
image:
offer: aro4
publisher: azureopenshift
sku: aro_48
version: 48.84.20210630
subnet: workers
apiVersion: azureproviderconfig.openshift.io/v1beta1
EOF
#########################
# Private Link Services #
#########################
node_rg_id=$(az aro show -n $cluster_name -g $rg --query 'clusterProfile.resourceGroupId' -o tsv)
node_rg_name=$(echo $node_rg_id | cut -d/ -f 5) && echo $node_rg_name
if [[ "$ingress_visibility" == "Public" ]]
then
alb_name=$(az network lb list -g $node_rg_name -o tsv --query "[?contains(name,'internal')].name")
frontend_id=$(az network lb frontend-ip list --lb-name $internal_alb_name -g $node_rg_name -o tsv --query '[0].id') && echo $internal_frontend_id
else
fi
az network vnet subnet create --vnet-name $vnet_name -g $rg -n $pls_subnet_name --address-prefix $pls_subnet_prefix
az network vnet subnet update --vnet-name $vnet_name -g $rg -n $pls_subnet_name --disable-private-link-service-network-policies
# tenant_id=$(az account show --query tenantId -o tsv) && echo $tenant_id
# az login --service-principal -u $sp_app_id -p $sp_app_secret --tenant $tenant_id # Otherwise no access to the ALB in the node RG
az network private-link-service create -g $rg -n AROTestPLS --vnet-name $vnet_name --subnet $pls_subnet_name --lb-frontend-ip-configs $frontend_id -l $location
################
# Stop/Start #
################
# Start/stop ARO VMs
# Usage:
# - aro_vm start
# - aro_vm stop
function aro_vm() {
# Get node RG before logging in as SP
if [[ -z "$node_rg_name" ]]
then
echo "Getting node RG name..."
node_rg_id=$(az aro show -n $cluster_name -g $rg --query 'clusterProfile.resourceGroupId' -o tsv)
node_rg_name=$(echo $node_rg_id | cut -d/ -f 5)
else
echo "Using node RG name $node_rg_name..."
fi
if [[ -n "$node_rg_name" ]]
then
echo "Node RG $node_rg_name"
# Log in with the ARP SP to have privilege to the resources in the node RG
echo "Getting logged user type..."
user_type=$(az account show --query 'user.type' -o tsv)
if [[ "$user_type" == "user" ]]
then
echo "Existing user logged not as Service Principal. Logging in as Service Principal $sp_app_id..."
tenant_id=$(az account show --query tenantId -o tsv) && echo "Tenant ID is $tenant_id"
az login --service-principal -u $sp_app_id -p $sp_app_secret --tenant $tenant_id -o none
fi
# Get list of VMs in the node RG
echo "Getting VM names in resource group $node_rg_name..."
vm_list=$(az vm list -g $node_rg_name --query '[].name' -o tsv | sort -u)
echo "$(echo $vm_list | wc -l) VMs found"
while IFS= read -r vm
do
if [[ "$1" == "stop" ]]
then
echo "Deallocating virtual machine $vm..."
az vm deallocate -n $vm -g $node_rg_name -o none
else
echo "Starting virtual machine $vm..."
az vm start -n $vm -g $node_rg_name -o none
fi
done <<< "$vm_list"
# Log in as standard user again
az login
else
echo "ERROR: Node RG name could not be found"
fi
}
# aro_vm stop
# aro_vm start
###########################
# Container Insights #
###########################
# Create LA workspace
logws_name=$(az monitor log-analytics workspace list -g $rg --query '[0].name' -o tsv)
if [[ -z "$logws_name" ]]
then
logws_name=log$RANDOM
echo "INFO: Creating log analytics workspace ${logws_name}..."
az monitor log-analytics workspace create -n $logws_name -g $rg -o none
else
echo "INFO: Log Analytics workspace $logws_name found in resource group $rg"
fi
logws_id=$(az resource list -g $rg -n $logws_name --query '[].id' -o tsv)
logws_customerid=$(az monitor log-analytics workspace show -n $logws_name -g $rg --query customerId -o tsv)
if [[ "$azmonitor_usearc" = "yes" ]]
then
# Enable Arc
echo "INFO: Enabling Arc in ARO cluster..."
az connectedk8s connect --name $arc_name -g $rg -l $location -o none
az connectedk8s list -g $rg -o table
kubectl -n azure-arc get deployments,pods
# Enable AzMonitor extension
echo "INFO: Enabling Arc extension for Container Insights in ARO cluster..."
az k8s-extension create --name azuremonitor-containers --cluster-name $arc_name --resource-group $rg --cluster-type connectedClusters \
--extension-type Microsoft.AzureMonitor.Containers --configuration-settings "logAnalyticsWorkspaceResourceID=${logws_id}" -o none
# Other possible config settings for the extension (see https://docs.microsoft.com/azure/azure-monitor/containers/container-insights-enable-arc-enabled-clusters):
# omsagent.resources.daemonset.limits.cpu=150m
# omsagent.resources.daemonset.limits.memory=600Mi
# omsagent.resources.deployment.limits.cpu=1
# omsagent.resources.deployment.limits.memory=750Mi
# Deploy defender extension to the same WS
# Looking into the nodes, it looks like kube-apiserver/audit.log contains more info than openshift-apiserver/audit.log...
echo "INFO: Enabling Arc extension for Azure Defender in ARO cluster..."
az k8s-extension create --name microsoft.azuredefender.kubernetes --cluster-type connectedClusters \
--cluster-name $arc_name --resource-group $rg --extension-type microsoft.azuredefender.kubernetes \
--configuration-settings "logAnalyticsWorkspaceResourceID=${logws_id}" "auditLogPath=/var/log/kube-apiserver/audit.log" -o none
# Verify extensions
az k8s-extension list -c $arc_name -g $rg --cluster-type ConnectedClusters -o table
# Getting some logs
query='ContainerLog
| where TimeGenerated > ago(5m)
| project TimeGenerated, LogEntry, ContainerID
| take 20'
az monitor log-analytics query -w $logws_customerid --analytics-query $query -o tsv
else
# NOTE: this will probably be migrated to Arc
aro_id=$(az aro show -n $cluster_name -g $rg --query id -o tsv) && echo $aro_id
curl -o /tmp/enable-monitoring.sh -L https://aka.ms/enable-monitoring-bash-script
bash /tmp/enable-monitoring.sh --resource-id $aro_id --workspace-id $logws_id
fi
#####################
# Azure Files #
#####################
# Variables
sc_name=myazfiles
pvc_name=myshare
app_name=azfilespod
# Create SC
cat <<EOF | kubectl apply -f -
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: $sc_name
provisioner: kubernetes.io/azure-file
mountOptions:
- dir_mode=0777
- file_mode=0777
- uid=0
- gid=0
- mfsymlinks
- cache=strict
- actimeo=30
parameters:
skuName: Standard_LRS
location: $location
EOF
# To prevent error: User "system:serviceaccount:kube-system:persistent-volume-binder" cannot create resource "secrets" in API group "" in the namespace "default"
# See https://bugzilla.redhat.com/show_bug.cgi?id=1575933
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: system:controller:persistent-volume-binder
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system:controller:persistent-volume-binder
subjects:
- kind: ServiceAccount
name: persistent-volume-binder
EOF
oc policy add-role-to-user admin system:serviceaccount:kube-system:persistent-volume-binder -n default
# Create PVC
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: $pvc_name
spec:
accessModes:
- ReadWriteMany
storageClassName: $sc_name
resources:
requests:
storage: 1Gi
EOF
# Create pod
cat <<EOF | kubectl apply -f -
kind: Pod
apiVersion: v1
metadata:
name: $app_name
labels:
app: $app_name
spec:
containers:
- name: $app_name
image: erjosito/sqlapi:1.0
resources:
requests:
cpu: 1000m
memory: 1024Mi
limits:
cpu: 1000m
memory: 1024Mi
ports:
- containerPort: 8080
volumeMounts:
- mountPath: "/mnt/azure"
name: volume
volumes:
- name: volume
persistentVolumeClaim:
claimName: $pvc_name
---
apiVersion: v1
kind: Service
metadata:
labels:
app: $app_name
name: $app_name
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: $app_name
type: LoadBalancer
EOF
# Diagnostics
secret_name=$(k get pv -o json | jq -r '.items[0].spec.azureFile.secretName') && echo $secret_name
storage_account_name=$(echo $secret_name | cut -d- -f 4) && echo $storage_account_name
share_name=$(k get pv -o json | jq -r '.items[0].spec.azureFile.shareName') && echo $share_name
az storage account show -g $cluster_rg -n $storage_account_name -o table
storage_account_key=$(az storage account keys list -n $storage_account_name -g $cluster_rg --query '[0].value' -o tsv) # Doesnt work, no privilege
storage_account_name=$(k get secret $secret_name -o json | jq -r '.data.azurestorageaccountname' | base64 -d) && echo $storage_account_name
storage_account_key=$(k get secret $secret_name -o json | jq -r '.data.azurestorageaccountkey' | base64 -d)
az storage share list --account-name $storage_account_name --account-key $storage_account_key -o table
k get sc
k get pvc
k describe pvc/$pvc_name
k get pv
k describe pv
k describe pod/$app_name
k exec $app_name -- bash -c 'ls -ald /mnt/*'
k exec $app_name -- bash -c 'apt install -y fio'
k exec $app_name -- bash -c 'cd /mnt/azure/ && fio --name=128krandomreads --rw=randread --direct=1 --ioengine=libaio --bs=128k --numjobs=4 --iodepth=128 --size=128M --runtime=600 --group_reporting'
k exec $app_name -- bash -c 'cd /mnt/azure/ && fio --name=256krandomreads --rw=randread --direct=1 --ioengine=libaio --bs=256k --numjobs=4 --iodepth=128 --size=128M --runtime=600 --group_reporting'
k exec -it $app_name -- bash
######################
# Pod identity #
######################
# Option 1: using managed identity
# ================================
# Create managed identity
id_name=podid
id_id=$(az identity show -n $id_name -g $rg --query id -o tsv 2>/dev/null)
if [[ -z "$id_id" ]]
then
echo "Creating identity ${id_name}..."
az identity create -n $id_name -g $rg
id_id=$(az identity show -n $id_name -g $rg --query id -o tsv 2>/dev/null)
fi
id_principal_id=$(az identity show -n $id_name -g $rg --query principalId -o tsv)
id_client_id="$(az identity show -g $rg -n $id_name --query clientId -o tsv)"
# Add permissions for Managed Identity (NOT WORKING)
# az login --service-principal -u $sp_app_id -p $sp_app_secret --tenant $tenant_id # Tried both with sub owner and with ARO SP
az role assignment create --role "Managed Identity Operator" --assignee "${id_principal_id}" --scope "/subscriptions/${subscription_id}/resourcegroups/${cluster_rg}"
az role assignment create --role "Virtual Machine Contributor" --assignee "${id_principal_id}" --scope "/subscriptions/${subscription_id}/resourcegroups/${cluster_rg}"
az role assignment create --role "Managed Identity Operator" --assignee "${id_principal_id}" --scope "/subscriptions/${subscription_id}/resourcegroups/${rg}"
# Add helm chart, get some variables
helm repo add aad-pod-identity https://raw.githubusercontent.com/Azure/aad-pod-identity/master/charts
subscription_id=$(az account show --query id -o tsv)
tenant_id=$(az account show --query tenantId -o tsv)
# Deploy helm chart for managed identity
# See https://azure.github.io/aad-pod-identity/docs/configure/deploy_in_openshift/
helm delete aad-pod-identity # If there was a previous instance of the helm chart
helm install aad-pod-identity aad-pod-identity/aad-pod-identity \
--set adminsecret.cloud=AzurePublicCloud \
--set "adminsecret.subscriptionID=$subscription_id" \
--set "adminsecret.resourceGroup=$cluster_rg" \
--set adminsecret.vmType=vmss \
--set "adminsecret.tenantID=$tenant_id" \
--set adminsecret.clientID=msi \
--set adminsecret.clientSecret=msi \
--set-string adminsecret.useMSI=true \
--set "adminsecret.userAssignedMSIClientID=$id_client_id"
# Option 2: using SP (the same as for cluster creation)
# =====================================================
# Add permissions for SP (NOT WORKING)
# az login --service-principal -u $sp_app_id -p $sp_app_secret --tenant $tenant_id # Tried both with sub owner and with ARO SP
sp_object_id=$(az ad sp show --id $sp_app_id --query objectId -o tsv) && echo $sp_object_id
az role assignment create --role "Managed Identity Operator" --assignee "${sp_object_id}" --scope "/subscriptions/${subscription_id}/resourcegroups/${cluster_rg}"
az role assignment create --role "Virtual Machine Contributor" --assignee "${sp_object_id}" --scope "/subscriptions/${subscription_id}/resourcegroups/${cluster_rg}"
az role assignment create --role "Managed Identity Operator" --assignee "${sp_object_id}" --scope "/subscriptions/${subscription_id}/resourcegroups/${rg}"
# Add helm chart, get some variables
helm repo add aad-pod-identity https://raw.githubusercontent.com/Azure/aad-pod-identity/master/charts
subscription_id=$(az account show --query id -o tsv)
tenant_id=$(az account show --query tenantId -o tsv)
# Deploy helm chart for SP
# See https://azure.github.io/aad-pod-identity/docs/configure/deploy_in_openshift/
helm delete aad-pod-identity # If there was a previous instance of the helm chart
helm install aad-pod-identity aad-pod-identity/aad-pod-identity \
--set adminsecret.cloud=AzurePublicCloud \
--set "adminsecret.subscriptionID=$subscription_id" \
--set "adminsecret.resourceGroup=$cluster_rg" \
--set adminsecret.vmType=vmss \
--set "adminsecret.tenantID=$tenant_id" \
--set "adminsecret.clientID=$sp_app_id" \
--set "adminsecret.clientSecret=$sp_app_secret" \
--set-string adminsecret.useMSI=false \
--set "adminsecret.userAssignedMSIClientID=$id_client_id"
# Test
# ====
# Create sample identity
sampleid_name=podidsample
sampleid_id=$(az identity show -n $sampleid_name -g $rg --query id -o tsv 2>/dev/null)
if [[ -z "$sampleid_id" ]]
then
echo "Creating identity ${sampleid_name}..."
az identity create -n $sampleid_name -g $rg
sampleid_id=$(az identity show -n $sampleid_name -g $rg --query id -o tsv 2>/dev/null)
fi
sampleid_principal_id=$(az identity show -n $sampleid_name -g $rg --query principalId -o tsv)
sampleid_client_id="$(az identity show -g $rg -n $sampleid_name --query clientId -o tsv)"
az keyvault set-policy -n "$keyvault_name" --object-id "$sampleid_principal_id" --secret-permissions get list
# Create AzureIdentity resource
cat <<EOF | kubectl apply -f -
apiVersion: "aadpodidentity.k8s.io/v1"
kind: AzureIdentity
metadata:
name: ${sampleid_name}
spec:
type: 0
resourceID: ${sampleid_id}
clientID: ${sampleid_client_id}
EOF
# Create AzureIdentityBinding resource
cat <<EOF | kubectl apply -f -
apiVersion: "aadpodidentity.k8s.io/v1"
kind: AzureIdentityBinding
metadata:
name: ${sampleid_name}-binding
spec:
azureIdentity: ${sampleid_name}
selector: ${sampleid_name}
EOF
# Deploy pod
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: demo
labels:
aadpodidbinding: $sampleid_name
spec:
containers:
- name: demo
image: mcr.microsoft.com/oss/azure/aad-pod-identity/demo:v1.8.0
args:
- --subscription-id=${subscription_id}
- --resource-group=${rg}
- --identity-client-id=${sampleid_client_id}
nodeSelector:
kubernetes.io/os: linux
EOF
# Alternatively, run another simple demo pod like this
# kubectl run azure-cli -it --image=mcr.microsoft.com/azure-cli --labels=aadpodidbinding=$sampleid_name /bin/bash
# within the azure-cli shell
# az login --identity --allow-no-subscriptions --debug
# Troubleshoot
kubectl --namespace=default get pods -l "app.kubernetes.io/component=mic" -o wide
kubectl --namespace=default get pods -l "app.kubernetes.io/component=nmi" -o wide
kubectl --namespace=default get pods -l "aadpodidbinding=$sampleid_name" -o wide
kubectl --namespace=default describe pods -l "aadpodidbinding=$sampleid_name"
kubectl logs demo
##################################
# Deploy second router #
##################################
domain=$(az aro show -n $cluster_name -g $rg --query 'clusterProfile.domain' -o tsv) && echo $domain
yaml_file=/tmp/router.yml
cat <<EOF > $yaml_file
apiVersion: operator.openshift.io/v1
kind: IngressController
metadata:
namespace: openshift-ingress-operator
name: internal
spec:
domain: intapps.${domain}.northeurope.aroapp.io
endpointPublishingStrategy:
type: LoadBalancerService
loadBalancer:
scope: Internal
namespaceSelector:
matchLabels:
type: internal
EOF
oc apply -f $yaml_file
# oc delete -f $yaml_file
#####################
# VM #
#####################
# You might need a VM in the vnet for some purposes:
# - Jump host in a private cluster
# - Custom DNS server/forwarder
# - Test connectivity
if [[ "$api_visibility" == "Private" ]]
then
# Deploy Ubuntu 18.04 VM and get its public IP address
vm_name=apivm
vm_nsg_name=${vm_name}-nsg
vm_pip_name=${vm_name}-pip
vm_disk_name=${vm_name}-disk0
vm_sku=Standard_B2ms
publisher=Canonical
offer=UbuntuServer
sku=18.04-LTS
image_urn=$(az vm image list -p $publisher -f $offer -s $sku -l $location --query '[0].urn' -o tsv)
az network vnet subnet create -n $vm_subnet_name --vnet-name $vnet_name -g $rg --address-prefixes $vm_subnet_prefix
# az vm create -n testvm -g $rg --image ubuntuLTS --generate-ssh-keys --public-ip-address testvm-pip --vnet-name $vnet_name --subnet $vm_subnet_name
az vm create -n $vm_name -g $rg -l $location --image $image_urn --size $vm_sku --generate-ssh-keys \
--os-disk-name $vm_disk_name --os-disk-size-gb 32 \
--vnet-name $vnet_name --subnet $vm_subnet_name \
--nsg $vm_nsg_name --nsg-rule SSH --public-ip-address $vm_pip_name
vm_pip_ip=$(az network public-ip show -n $vm_pip_name -g $rg --query ipAddress -o tsv)
# Test access to VM
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "ip a"
# Install Azure CLI
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash"
# Install kubectl
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "sudo apt-get update && sudo apt-get install -y apt-transport-https"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" 'echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list'
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "sudo apt-get update"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "sudo apt-get install -y kubectl"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "kubectl version"
# Download oc
oc_url="https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz"
oc_file="openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz"
oc_dir="openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "wget $oc_url"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "tar xvf $oc_file"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "ls ./$oc_dir"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc login $aro_api_url -u $aro_user -p $aro_pwd"
# Cluster-info
aro_usr=$(az aro list-credentials -n $cluster_name -g $rg --query kubeadminUsername -o tsv) && echo "$aro_usr"
aro_pwd=$(az aro list-credentials -n $cluster_name -g $rg --query kubeadminPassword -o tsv)
aro_api_url=$(az aro show -n $cluster_name -g $rg --query 'apiserverProfile.url' -o tsv) && echo "$aro_api_url"
aro_api_ip=$(az aro show -n $cluster_name -g $rg --query 'apiserverProfile.ip' -o tsv) && echo "$aro_api_ip"
router_ip=$(az aro show -n $cluster_name -g $rg --query 'ingressProfiles[0].ip' -o tsv) && echo "$router_ip"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "sudo sed -i \"\$ a $aro_api_ip $aro_api_url\" /etc/hosts"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc login $aro_api_url -u $aro_usr -p $aro_pwd --insecure-skip-tls-verify"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/kubectl cluster-info"
domain=$(az aro show -n $cluster_name -g $rg --query 'clusterProfile.domain' -o tsv)
aro_api_fqdn=api.${domain}.${location}.aroapp.io
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "nslookup $aro_api_fqdn"
# Router info
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc get svc --all-namespaces | grep LoadBalancer"
# Install a DNS server and configure a sample IP in the hosts file, to test resolution from the ARO cluster
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "sudo apt update && sudo apt -y install dnsmasq"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "sudo sed -i \"\$ a 1.2.3.4 myserver.onprem.contoso.com\" /etc/hosts"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "cat /etc/hosts"
# Configure private DNS zones for the apps and the API
# The API is already registered by the cluster, no need to do it again
# az network private-dns zone create -n "$domain" -g "$rg"
# az network private-dns record-set a add-record --record-set-name api -z $domain -g $rg -a $aro_api_ip
# az network private-dns link vnet create -g $rg -z $domain -n arodomain --virtual-network $vnet_name --registration-enabled false
az network private-dns zone create -n "apps.${domain}" -g "$rg"
az network private-dns record-set a add-record --record-set-name '*' -z "apps.${domain}" -g $rg -a $router_ip
az network private-dns link vnet create -g $rg -z "apps.${domain}" -n arorouter --virtual-network $vnet_name --registration-enabled false
fi
###################################
# Deploy sample app (private API) #
###################################
if [[ "$api_visibility" == "Private" ]]
then
# Example: kuard
project_name=kuard
image=gcr.io/kuar-demo/kuard-amd64:1
# Example: whoami api
project_name=whoami
image=erjosito/sqlapi:1.0
# Go
app_name="$project_name"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc new-project $project_name"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc new-app --docker-image $image --name $app_name"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc get dc"
# Exposing over ILB
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc expose dc $app_name --port 8080 --type=LoadBalancer --name=$app_name --dry-run -o yaml | awk '1;/metadata:/{ print " annotations:\n service.beta.kubernetes.io/azure-load-balancer-internal: \"true\"" }' | oc create -f -"
# Exposing over clusterip Svc should not be required
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc expose dc $app_name --port 8080 --type=ClusterIP --name=$app_name"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc get svc"
# Exposing ClusterIP Svc over a route
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc expose svc $app_name"
app_url=$(ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc get route $app_name -o json" | jq -r '.spec.host')
# Test reachability to API
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "curl -s http://{$app_url}/api/ip"
# Configure DNS operator
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/oc version"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/kubectl get dns.operator/default -o yaml"
vm_private_ip=$(az vm list-ip-addresses -n $vm_name -g $rg --query '[0].virtualMachine.network.privateIpAddresses[0]' -o tsv) && echo $vm_private_ip
# Does not work!
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/kubectl get dns.operator/default -o yaml | sed \
's/spec: {}/spec:\\n servers:\\n - name: contoso\\n zones:\\n - onprem.contoso.com\\n forwardPlugin:\\n upstreams:\\n - $vm_private_ip/g' | ./$oc_dir/kubectl replace -f -"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "./$oc_dir/kubectl get dns.operator/default -o yaml"
# Test DNS resolution from cluster
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "curl -s http://{$app_url}/api/dns?fqdn=google.com"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "ping myserver.onprem.contoso.com -c 1"
ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no "$vm_pip_ip" "curl -s http://{$app_url}/api/dns?fqdn=myserver.onprem.contoso.com"
fi
#################
# Trident #
#################
# Get last version of trident
trident_lastversion_json=$(curl --silent "https://api.github.com/repos/NetApp/trident/releases/latest" | jq 'with_entries(select([.key] | inside(["html_url", "assets", "name"])))')
trident_lastversion=$(echo "$trident_lastversion_json" | jq -r '.name')
trident_lastversion_url=$(echo "$trident_lastversion_json" | jq -r '.html_url')
trident_lastversion_file=$(echo "$trident_lastversion_json" | jq -r '.assets[0].name')
trident_lastversion_file_url=$(echo "$trident_lastversion_json" | jq -r '.assets[0].browser_download_url')
# See whether we need to install/upgrade tridentctl
install_trident=no
tridentctl_binary=$(which tridentctl)
if [[ "$tridentctl_binary" == "tridentctl not found" ]]
then
echo "tridentctl not found"
install_trident=yes
else
trident_version="v$(tridentctl version --client -o json | jq -r '.client.version')"
if [[ "$trident_version" == "$trident_lastversion" ]]
then
echo "tridentctl version is already the latest available version ($trident_version)"
else
echo "tridentctl needs to be upgraded from version $trident_version to $trident_lastversion"
install_trident=yes
fi
fi
# Install
if [[ "$install_trident" == "yes" ]]