-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathprovision-vms.sh
executable file
·170 lines (126 loc) · 6.43 KB
/
provision-vms.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
#!/bin/bash
source "common.sh"
source "$PROJECT_DIR/tools/vbmc-funcs.sh"
CLUSTER_NAME="kni-upi-lab"
NUM_MASTERS=$(yq -r '.controlPlane.replicas' $PROJECT_DIR/cluster/install-config.yaml)
NUM_WORKERS=$(yq -r '.compute[0].replicas' $PROJECT_DIR/cluster/install-config.yaml)
MASTER_PROV_MAC_PREFIX="52:54:00:82:68:4"
MASTER_BM_MAC_PREFIX="52:54:00:82:69:4"
WORKER_PROV_MAC_PREFIX="52:54:00:82:68:5"
WORKER_BM_MAC_PREFIX="52:54:00:82:69:5"
MASTER_VBMC_PORT_START=624
WORKER_VBMC_PORT_START=625
LIBVIRT_STORAGE_POOL="default"
(
$PROJECT_DIR/tools/clean-vms.sh
) || exit 1
for i in $(seq 0 $((NUM_MASTERS - 1))); do
name="$CLUSTER_NAME-master-$i"
sudo virt-install --ram 16384 --vcpus 4 --os-variant rhel7 --cpu host-passthrough --disk size=40,pool=$LIBVIRT_STORAGE_POOL,device=disk,bus=virtio,format=qcow2 --import --noautoconsole --vnc --network=bridge:provisioning,mac="$MASTER_PROV_MAC_PREFIX$i" --network=bridge:baremetal,mac="$MASTER_BM_MAC_PREFIX$i" --name "$name" --os-type=linux --events on_reboot=destroy,on_lockfailure=poweroff --boot hd,network
vm_ready=false
for k in {1..10}; do
if [[ -n "$(virsh list | grep $name | grep running)" ]]; then
vm_ready=true
break;
else
echo "wait $k";
sleep 1;
fi;
done
if [ $vm_ready = true ]; then
create_vbmc "$name" "$MASTER_VBMC_PORT_START$i"
sleep 2
ipmi_output=$(ipmitool -I lanplus -U ADMIN -P ADMIN -H 127.0.0.1 -p "$MASTER_VBMC_PORT_START$i" power off)
RETRIES=0
while [[ "$ipmi_output" != "Chassis Power Control: Down/Off" ]]; do
if [[ $RETRIES -ge 2 ]]; then
echo "FAIL: Unable to start $name vBMC!"
exit 1
fi
echo "IPMI failure detected -- trying to start $name vBMC again..."
vbmc start "$name" > /dev/null 2>&1
sleep 1
ipmi_output=$(ipmitool -I lanplus -U ADMIN -P ADMIN -H 127.0.0.1 -p "$MASTER_VBMC_PORT_START$i" power off)
RETRIES=$((RETRIES+1))
done
echo "$name vBMC started and IPMI command succeeded!"
fi
done
for i in $(seq 0 $((NUM_WORKERS - 1))); do
name="$CLUSTER_NAME-worker-$i"
sudo virt-install --ram 16384 --vcpus 4 --os-variant rhel7 --cpu host-passthrough --disk size=40,pool=$LIBVIRT_STORAGE_POOL,device=disk,bus=virtio,format=qcow2 --import --noautoconsole --vnc --network=bridge:provisioning,mac="$WORKER_PROV_MAC_PREFIX$i" --network=bridge:baremetal,mac="$WORKER_BM_MAC_PREFIX$i" --name "$name" --os-type=linux --events on_reboot=destroy,on_lockfailure=poweroff --boot hd,network
vm_ready=false
for k in {1..10}; do
if [[ -n "$(virsh list | grep $name | grep running)" ]]; then
vm_ready=true
break;
else
echo "wait $k";
sleep 1;
fi;
done
if [ $vm_ready = true ]; then
create_vbmc "$name" "$WORKER_VBMC_PORT_START$i"
sleep 2
ipmi_output=$(ipmitool -I lanplus -U ADMIN -P ADMIN -H 127.0.0.1 -p "$WORKER_VBMC_PORT_START$i" power off)
RETRIES=0
while [[ "$ipmi_output" != "Chassis Power Control: Down/Off" ]]; do
if [[ $RETRIES -ge 2 ]]; then
echo "FAIL: Unable to start $name vBMC!"
exit 1
fi
echo "IPMI failure detected -- trying to start $name vBMC again..."
vbmc start "$name" > /dev/null 2>&1
sleep 1
ipmi_output=$(ipmitool -I lanplus -U ADMIN -P ADMIN -H 127.0.0.1 -p "$WORKER_VBMC_PORT_START$i" power off)
RETRIES=$((RETRIES+1))
done
echo "$name vBMC started and IPMI command succeeded!"
fi
done
#
# Update cluster/install-config.yaml
#
PLATFORM_HOSTS=""
for i in $(seq 0 $((NUM_MASTERS - 1))); do
PLATFORM_HOSTS="$PLATFORM_HOSTS{\"bmc\": {\"address\": \"ipmi://127.0.0.1:$MASTER_VBMC_PORT_START$i\", \"credentialsName\": \"ha-lab-ipmi\"}, \"bootMACAddress\": \"$MASTER_PROV_MAC_PREFIX$i\", \"hardwareProfile\": \"default\", \"name\": \"master-$i\", \"osProfile\": {\"install_dev\": \"vda\", \"pxe\": \"bios\"}, \"role\": \"master\", \"sdnMacAddress\": \"$MASTER_BM_MAC_PREFIX$i\"},"
done
for i in $(seq 0 $((NUM_WORKERS - 1))); do
PLATFORM_HOSTS="$PLATFORM_HOSTS{\"bmc\": {\"address\": \"ipmi://127.0.0.1:$WORKER_VBMC_PORT_START$i\", \"credentialsName\": \"ha-lab-ipmi\"}, \"bootMACAddress\": \"$WORKER_PROV_MAC_PREFIX$i\", \"hardwareProfile\": \"default\", \"name\": \"worker-$i\", \"osProfile\": {\"install_dev\": \"vda\", \"pxe\": \"bios\"}, \"role\": \"worker\", \"sdnMacAddress\": \"$WORKER_BM_MAC_PREFIX$i\"},"
done
PLATFORM_HOSTS=$(echo $PLATFORM_HOSTS | sed 's/.$//')
TJQ=$(yq -y ".platform.hosts = [$PLATFORM_HOSTS]" < $PROJECT_DIR/cluster/install-config.yaml)
[[ $? == 0 ]] && echo "${TJQ}" >| $PROJECT_DIR/cluster/install-config.yaml
echo "$PROJECT_DIR/cluster/install-config.yaml updated with virtualization data!"
#
# Update cluster/site-config.yaml
#
TJQ=$(yq -y '.provisioningInfrastructure.hosts.masterBootInterface="ens3"' < $PROJECT_DIR/cluster/site-config.yaml)
[[ $? == 0 ]] && echo "${TJQ}" >| $PROJECT_DIR/cluster/site-config.yaml
TJQ=$(yq -y '.provisioningInfrastructure.hosts.masterSdnInterface="ens4"' < $PROJECT_DIR/cluster/site-config.yaml)
[[ $? == 0 ]] && echo "${TJQ}" >| $PROJECT_DIR/cluster/site-config.yaml
TJQ=$(yq -y '.provisioningInfrastructure.hosts.workerBootInterface="ens3"' < $PROJECT_DIR/cluster/site-config.yaml)
[[ $? == 0 ]] && echo "${TJQ}" >| $PROJECT_DIR/cluster/site-config.yaml
TJQ=$(yq -y '.provisioningInfrastructure.hosts.workerSdnInterface="ens4"' < $PROJECT_DIR/cluster/site-config.yaml)
[[ $? == 0 ]] && echo "${TJQ}" >| $PROJECT_DIR/cluster/site-config.yaml
TJQ=$(yq -y ".provisioningInfrastructure.virtualMasters = true" < $PROJECT_DIR/cluster/site-config.yaml)
[[ $? == 0 ]] && echo "${TJQ}" >| $PROJECT_DIR/cluster/site-config.yaml
TJQ=$(yq -y ".provisioningInfrastructure.virtualWorkers = true" < $PROJECT_DIR/cluster/site-config.yaml)
[[ $? == 0 ]] && echo "${TJQ}" >| $PROJECT_DIR/cluster/site-config.yaml
echo "$PROJECT_DIR/cluster/site-config.yaml updated with virtualization data!"
#
# Start VM boot service
#
printf "\nCreating VM boot service...\n\n"
pwd=`pwd`
sudo tee "/etc/systemd/system/vmboot.service" > /dev/null << EOF
[Unit]
Description=VM boot helper
[Service]
ExecStart=/bin/bash $pwd/vm-boot-helper.sh
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl start vmboot
sudo systemctl enable vmboot