diff --git a/scripts/openyurt-deployer/main.go b/scripts/openyurt-deployer/main.go index 86e414443..61347a7d2 100644 --- a/scripts/openyurt-deployer/main.go +++ b/scripts/openyurt-deployer/main.go @@ -4,6 +4,7 @@ import ( "encoding/json" "flag" "os" + "strings" "github.com/vhive-serverless/vHive/scripts/utils" @@ -70,8 +71,8 @@ func main() { // delDemo(*deployerConf) // case "demo-print": // printDemo(*deployerConf) - // case "deploy-yurt": - // deployOpenYurt(*deployerConf) + case "deploy-yurt": + deployOpenYurt(*deployerConf) default: utils.InfoPrintf("Usage: %s [Parameters...]\n", os.Args[0]) os.Exit(-1) @@ -182,6 +183,49 @@ func deployNodes(deployerConfFile string) { // init demo environment masterNode.BuildDemo(workerNodes) +} + +func deployOpenYurt(deployerConfFile string) { + + nodesInfo, err := readAndUnMarshall(deployerConfFile) + utils.CheckErrorWithMsg(err, "Failed to read and unmarshal deployer configuration JSON") + nodeList := initializeNodes(nodesInfo) + masterNode := nodeList[0] + workerNodes := nodeList[1:] + + // init yurt cluster + utils.SuccessPrintf("Start to init yurt cluster!\n") + masterNode.YurtMasterInit() + + utils.WaitPrintf("Extracting master node information from logs") + output, err := masterNode.ExecShellCmd("sed -n '1p;2p;3p;4p' %s/masterNodeValues", masterNode.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to extract master node information from logs!\n") + // Process the content and assign it to variables + lines := strings.Split(strings.TrimSpace(output), "\n") + if len(lines) != 4 { + utils.ErrorPrintf("Invalid file format") + return + } + + addr := lines[0] + port := lines[1] + token := lines[2] + + for _, worker := range workerNodes { + worker.YurtWorkerJoin(addr, port, token) + utils.InfoPrintf("worker %s joined yurt cluster!\n", worker.Configs.System.NodeHostName) + } + utils.SuccessPrintf("All nodes joined yurt cluster, start to expand\n") + for _, worker := range workerNodes { + masterNode.YurtMasterExpand(&worker) + utils.InfoPrintf("Master has expanded to worker:%s\n", worker.Configs.System.NodeHostName) + } + utils.SuccessPrintf("Master has expaned to all nodes!\n") + + for _, node := range nodeList { + utils.InfoPrintf("node: %s\n", node.Name) + node.CleanUpTmpDir() + } utils.SuccessPrintf(">>>>>>>>>>>>>>>>OpenYurt Cluster Deployment Finished!<<<<<<<<<<<<<<<\n") } diff --git a/scripts/openyurt-deployer/node/knative.go b/scripts/openyurt-deployer/node/knative.go index 9323a530d..ccbfaa38a 100644 --- a/scripts/openyurt-deployer/node/knative.go +++ b/scripts/openyurt-deployer/node/knative.go @@ -12,7 +12,6 @@ func (node *Node) InstallKnativeServing() { var err error node.CreateTmpDir() - defer node.CleanUpTmpDir() // Install and configure MetalLB utils.WaitPrintf("Installing and configuring MetalLB") diff --git a/scripts/openyurt-deployer/node/kube.go b/scripts/openyurt-deployer/node/kube.go index 821d135ac..5c468802d 100644 --- a/scripts/openyurt-deployer/node/kube.go +++ b/scripts/openyurt-deployer/node/kube.go @@ -14,7 +14,6 @@ func (node *Node) KubeMasterInit() (string, string, string, string) { var err error node.check_kube_environment() node.CreateTmpDir() - defer node.CleanUpTmpDir() // Pre-pull Image utils.WaitPrintf("Pre-Pulling required images") @@ -54,8 +53,10 @@ func (node *Node) KubeMasterInit() (string, string, string, string) { // Extract master node information from logs utils.WaitPrintf("Extracting master node information from logs") shellOut, err := node.ExecShellCmd("sed -n '/.*kubeadm join.*/p' < %s/masterNodeInfo | sed -n 's/.*join \\(.*\\):\\(\\S*\\) --token \\(\\S*\\).*/\\1 \\2 \\3/p'", node.Configs.System.TmpDir) + utils.InfoPrintf("shellOut 2: %s\n", shellOut) //DEBUG utils.CheckErrorWithMsg(err, "Failed to extract master node information from logs!\n") splittedOut := strings.Split(shellOut, " ") + utils.InfoPrintf("spiltOut 3: %s\n", splittedOut) //DEBUG node.Configs.Kube.ApiserverAdvertiseAddress = splittedOut[0] node.Configs.Kube.ApiserverPort = splittedOut[1] node.Configs.Kube.ApiserverToken = splittedOut[2] @@ -63,6 +64,10 @@ func (node *Node) KubeMasterInit() (string, string, string, string) { utils.CheckErrorWithTagAndMsg(err, "Failed to extract master node information from logs!\n") node.Configs.Kube.ApiserverTokenHash = shellOut + shellData := fmt.Sprintf("echo '%s\n%s\n%s\n%s' > %s/masterNodeValues", node.Configs.Kube.ApiserverAdvertiseAddress, node.Configs.Kube.ApiserverPort, node.Configs.Kube.ApiserverToken, node.Configs.Kube.ApiserverTokenHash, node.Configs.System.TmpDir) + _, err = node.ExecShellCmd(shellData) + utils.CheckErrorWithTagAndMsg(err, "Failed to write master node information to file!\n") + return node.Configs.Kube.ApiserverAdvertiseAddress, node.Configs.Kube.ApiserverPort, node.Configs.Kube.ApiserverToken, diff --git a/scripts/openyurt-deployer/node/yurt.go b/scripts/openyurt-deployer/node/yurt.go new file mode 100644 index 000000000..537ac0148 --- /dev/null +++ b/scripts/openyurt-deployer/node/yurt.go @@ -0,0 +1,249 @@ +// Author: Haoyuan Ma +package node + +import ( + "strings" + "time" + + "github.com/vhive-serverless/vHive/scripts/utils" + "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/template" +) + +func (node *Node) CheckYurtMasterEnvironment() { + node.OnlyExecByMaster() + // Check environment + var err error + utils.InfoPrintf("Checking system environment...\n") + + // Check Helm + _, err = node.LookPath("helm") + if err != nil { + utils.WarnPrintf("Helm not found! Helm will be automatically installed!\n") + } else { + utils.SuccessPrintf("Helm found!\n") + node.Configs.Yurt.HelmInstalled = true + } + + // Check Kustomize + _, err = node.LookPath("kustomize") + if err != nil { + utils.WarnPrintf("Kustomize not found! Kustomize will be automatically installed!\n") + } else { + utils.SuccessPrintf("Kustomize found!\n") + node.Configs.Yurt.KustomizeInstalled = true + } + + // Add OS-specific dependencies to installation lists + switch node.Configs.System.CurrentOS { + case "ubuntu": + node.Configs.Yurt.Dependencies = "curl apt-transport-https ca-certificates build-essential git" + case "rocky linux": + node.Configs.Yurt.Dependencies = "" + case "centos": + node.Configs.Yurt.Dependencies = "" + default: + utils.FatalPrintf("Unsupported OS: %s\n", node.Configs.System.CurrentOS) + } + + utils.SuccessPrintf("Finished checking system environment!\n") +} + +// Initialize Openyurt on master node +func (node *Node) YurtMasterInit() { + node.OnlyExecByMaster() + // Initialize + var err error + node.CheckYurtMasterEnvironment() + node.CreateTmpDir() + // defer node.CleanUpTmpDir() + + // Install dependencies + utils.WaitPrintf("Installing dependencies") + err = node.InstallPackages(node.Configs.Yurt.Dependencies) + utils.CheckErrorWithTagAndMsg(err, "Failed to install dependencies!\n") + + // Treat master as cloud node + if node.Configs.Yurt.MasterAsCloud { + utils.WarnPrintf("Master node WILL also be treated as a cloud node!\n") + node.ExecShellCmd("kubectl taint nodes --all node-role.kubernetes.io/master:NoSchedule-") + node.ExecShellCmd("kubectl taint nodes --all node-role.kubernetes.io/control-plane-") + } + + // Install helm + if !node.Configs.Yurt.HelmInstalled { + switch node.Configs.System.CurrentOS { + case "ubuntu": + // Download public signing key && Add the Helm apt repository + utils.WaitPrintf("Downloading public signing key && Add the Helm apt repository") + // Download public signing key + filePathName, err := node.DownloadToTmpDir(node.Configs.Yurt.HelmPublicSigningKeyDownloadUrl) + utils.CheckErrorWithMsg(err, "Failed to download public signing key && add the Helm apt repository!\n") + _, err = node.ExecShellCmd("sudo mkdir -p /usr/share/keyrings && cat %s | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null", filePathName) + utils.CheckErrorWithMsg(err, "Failed to download public signing key && add the Helm apt repository!\n") + // Add the Helm apt repository + _, err = node.ExecShellCmd(`echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list`) + utils.CheckErrorWithTagAndMsg(err, "Failed to download public signing key && add the Helm apt repository!\n") + // Install helm + utils.WaitPrintf("Installing Helm") + err = node.InstallPackages("helm") + utils.CheckErrorWithTagAndMsg(err, "Failed to install helm!\n") + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + } + } + + // Install kustomize + if !node.Configs.Yurt.KustomizeInstalled { + // Download kustomize helper script + utils.WaitPrintf("Downloading kustomize") + filePathName, err := node.DownloadToTmpDir(node.Configs.Yurt.KustomizeScriptDownloadUrl) + utils.CheckErrorWithMsg(err, "Failed to download kustomize!\n") + // Download kustomize + _, err = node.ExecShellCmd("chmod u+x %s && %s %s", filePathName, filePathName, node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to download kustomize!\n") + // Install kustomize + utils.WaitPrintf("Installing kustomize") + _, err = node.ExecShellCmd("sudo cp %s /usr/local/bin", node.Configs.System.TmpDir+"/kustomize") + utils.CheckErrorWithTagAndMsg(err, "Failed to Install kustomize!\n") + } + + // Add OpenYurt repo with helm + utils.WaitPrintf("Adding OpenYurt repo(version %s) with helm", node.Configs.Yurt.YurtVersion) + _, err = node.ExecShellCmd("git clone --quiet https://github.com/openyurtio/openyurt-helm.git %s/openyurt-helm && pushd %s/openyurt-helm && git checkout openyurt-%s && popd", node.Configs.System.TmpDir, node.Configs.System.TmpDir, node.Configs.Yurt.YurtVersion) + utils.CheckErrorWithTagAndMsg(err, "Failed to add OpenYurt repo with helm!\n") + + // Deploy yurt-app-manager + utils.WaitPrintf("Deploying yurt-app-manager") + _, err = node.ExecShellCmd("helm install yurt-app-manager -n kube-system %s/openyurt-helm/charts/yurt-app-manager", node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to deploy yurt-app-manager!\n") + + // Wait for yurt-app-manager to be ready + utils.WaitPrintf("Waiting for yurt-app-manager to be ready") + waitCount := 1 + for { + yurtAppManagerStatus, err := node.ExecShellCmd(`kubectl get pod -n kube-system | grep yurt-app-manager | sed -n "s/\s*\(\S*\)\s*\(\S*\)\s*\(\S*\).*/\2 \3/p"`) + utils.CheckErrorWithMsg(err, "Failed to wait for yurt-app-manager to be ready!\n") + if yurtAppManagerStatus == "1/1 Running" { + utils.SuccessPrintf("\n") + break + } else { + utils.WarnPrintf("Waiting for yurt-app-manager to be ready [%ds]\n", waitCount) + waitCount += 1 + time.Sleep(time.Second) + } + } + + // Deploy yurt-controller-manager + utils.WaitPrintf("Deploying yurt-controller-manager") + _, err = node.ExecShellCmd("helm install openyurt %s/openyurt-helm/charts/openyurt -n kube-system", node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to deploy yurt-controller-manager!\n") + + // Setup raven-controller-manager Component + // Clone repository + utils.WaitPrintf("Cloning repo: raven-controller-manager") + _, err = node.ExecShellCmd("git clone --quiet https://github.com/openyurtio/raven-controller-manager.git %s/raven-controller-manager", node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to clone repo: raven-controller-manager!\n") + // Deploy raven-controller-manager + utils.WaitPrintf("Deploying raven-controller-manager") + _, err = node.ExecShellCmd("pushd %s/raven-controller-manager && git checkout v0.3.0 && make generate-deploy-yaml && kubectl apply -f _output/yamls/raven-controller-manager.yaml && popd", node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to deploy raven-controller-manager!\n") + + // Setup raven-agent Component + // Clone repository + utils.WaitPrintf("Cloning repo: raven-agent") + _, err = node.ExecShellCmd("git clone --quiet https://github.com/openyurtio/raven.git %s/raven-agent", node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to clone repo: raven-agent!\n") + // Deploy raven-agent + utils.WaitPrintf("Deploying raven-agent") + _, err = node.ExecShellCmd("pushd %s/raven-agent && git checkout v0.3.0 && FORWARD_NODE_IP=true make deploy && popd", node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to deploy raven-agent!\n") +} + +// Expand Openyurt to worker node +func (node *Node) YurtMasterExpand(worker *Node) { + node.OnlyExecByMaster() + // Initialize + var err error + var workerAsEdge string + + // Label worker node as cloud/edge + utils.WaitPrintf("Labeling worker node: %s", worker.Configs.System.NodeHostName) + if worker.NodeRole == "edge" { + workerAsEdge = "true" + } else if worker.NodeRole == "cloud" { + workerAsEdge = "false" + } else { + utils.FatalPrintf("worker's role must be edge or cloud, but this node's role is %s", worker.NodeRole) + } + _, err = node.ExecShellCmd("kubectl label node %s openyurt.io/is-edge-worker=%s --overwrite", worker.Configs.System.NodeHostName, workerAsEdge) + utils.CheckErrorWithTagAndMsg(err, "Failed to label worker node!\n") + + // Activate the node autonomous mode + utils.WaitPrintf("Activating the node autonomous mode") + _, err = node.ExecShellCmd("kubectl annotate node %s node.beta.openyurt.io/autonomy=true --overwrite", worker.Configs.System.NodeHostName) + utils.CheckErrorWithTagAndMsg(err, "Failed to activate the node autonomous mode!\n") + + // Wait for worker node to be Ready + utils.WaitPrintf("Waiting for worker node to be ready") + waitCount := 1 + for { + workerNodeStatus, err := node.ExecShellCmd(`kubectl get nodes | sed -n "/.*%s.*/p" | sed -n "s/\s*\(\S*\)\s*\(\S*\).*/\2/p"`, worker.Configs.System.NodeHostName) + utils.CheckErrorWithMsg(err, "Failed to wait for worker node to be ready!\n") + if workerNodeStatus == "Ready" { + utils.SuccessPrintf("\n") + break + } else { + utils.WarnPrintf("Waiting for worker node to be ready [%ds]\n", waitCount) + waitCount += 1 + time.Sleep(time.Second) + } + } + + // Restart pods in the worker node + utils.WaitPrintf("Restarting pods in the worker node") + shellOutput, err := node.ExecShellCmd(template.GetRestartPodsShell(), worker.Configs.System.NodeHostName) + utils.CheckErrorWithMsg(err, "Failed to restart pods in the worker node!\n") + podsToBeRestarted := strings.Split(shellOutput, "\n") + for _, pods := range podsToBeRestarted { + podsInfo := strings.Split(pods, " ") + utils.WaitPrintf("Restarting pod: %s => %s", podsInfo[0], podsInfo[1]) + _, err = node.ExecShellCmd("kubectl -n %s delete pod %s", podsInfo[0], podsInfo[1]) + utils.CheckErrorWithTagAndMsg(err, "Failed to restart pods in the worker node!\n") + } +} + +// Join existing Kubernetes worker node to Openyurt cluster +func (node *Node) YurtWorkerJoin(addr string, port string, token string) { + + // Initialize + var err error + + // Set up Yurthub + utils.WaitPrintf("Setting up Yurthub") + _, err = node.ExecShellCmd( + "echo '%s' | sed -e 's|__kubernetes_master_address__|%s:%s|' -e 's|__bootstrap_token__|%s|' | sudo tee /etc/kubernetes/manifests/yurthub-ack.yaml", + template.GetYurtHubConfig(), + addr, + port, + token) + utils.CheckErrorWithTagAndMsg(err, "Failed to set up Yurthub!\n") + + // Configure Kubelet + utils.WaitPrintf("Configuring kubelet") + node.ExecShellCmd("sudo mkdir -p /var/lib/openyurt && echo '%s' | sudo tee /var/lib/openyurt/kubelet.conf", template.GetKubeletConfig()) + utils.CheckErrorWithMsg(err, "Failed to configure kubelet!\n") + node.ExecShellCmd(`sudo sed -i "s|KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=\/etc\/kubernetes\/bootstrap-kubelet.conf\ --kubeconfig=\/etc\/kubernetes\/kubelet.conf|KUBELET_KUBECONFIG_ARGS=--kubeconfig=\/var\/lib\/openyurt\/kubelet.conf|g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf`) + utils.CheckErrorWithMsg(err, "Failed to configure kubelet!\n") + node.ExecShellCmd("sudo systemctl daemon-reload && sudo systemctl restart kubelet") + utils.CheckErrorWithTagAndMsg(err, "Failed to configure kubelet!\n") +} + +func (node *Node) YurtWorkerClean() { + node.OnlyExecByWorker() + var err error + utils.WaitPrintf("Cleaning openyurt kubelet on node:%s", node.Name) + _, err = node.ExecShellCmd("sudo rm -rf /var/lib/openyurt") + _, err = node.ExecShellCmd("sudo rm /etc/kubernetes/pki/ca.crt") + _, err = node.ExecShellCmd(`sudo sed -i "s|KUBELET_KUBECONFIG_ARGS=--kubeconfig=\/var\/lib\/openyurt\/kubelet.conf|KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=\/etc\/kubernetes\/bootstrap-kubelet.conf\ --kubeconfig=\/etc\/kubernetes\/kubelet.conf|g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf`) + utils.CheckErrorWithMsg(err, "Failed to clean kubelet on node: %s", node.Name) +} diff --git a/scripts/openyurt-deployer/openyurt_deployer b/scripts/openyurt-deployer/openyurt_deployer index 6d05d8931..930cd30e2 100755 Binary files a/scripts/openyurt-deployer/openyurt_deployer and b/scripts/openyurt-deployer/openyurt_deployer differ diff --git a/scripts/openyurt-deployer/template/yurtAppSetTemplate.go b/scripts/openyurt-deployer/template/yurtAppSetTemplate.go new file mode 100644 index 000000000..e38aece86 --- /dev/null +++ b/scripts/openyurt-deployer/template/yurtAppSetTemplate.go @@ -0,0 +1,67 @@ +package template + +const ( + YurtAppSetTemplate = `echo 'apiVersion: apps.openyurt.io/v1alpha1 +kind: YurtAppSet +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: openyurt-aes +spec: + selector: + matchLabels: + app: openyurt-aes + workloadTemplate: + deploymentTemplate: + metadata: + labels: + app: openyurt-aes + spec: + template: + metadata: + labels: + app: openyurt-aes + spec: + containers: + - name: relay + image: docker.io/vhiveease/relay:latest + ports: + - name: h2c + containerPort: 50000 + args: + - --addr=0.0.0.0:50000 + - --function-endpoint-url=0.0.0.0 + - --function-endpoint-port=50051 + - --function-name=aes-python + - name: aes-python + image: docker.io/vhiveease/aes-python:latest + args: + - --addr=0.0.0.0 + - --port=50051 + topology: + pools: + - name: %s + nodeSelectorTerm: + matchExpressions: + - key: apps.openyurt.io/nodepool + operator: In + values: + - %s + replicas: 1 + - name: %s + nodeSelectorTerm: + matchExpressions: + - key: apps.openyurt.io/nodepool + operator: In + values: + - %s + replicas: 1 + tolerations: + - effect: NoSchedule + key: apps.openyurt.io/example + operator: Exists' >> %s` +) + +func GetYurtAppSetTemplate() string { + return YurtAppSetTemplate +} diff --git a/scripts/openyurt-deployer/template/yurtTemplate.go b/scripts/openyurt-deployer/template/yurtTemplate.go new file mode 100644 index 000000000..a7e753409 --- /dev/null +++ b/scripts/openyurt-deployer/template/yurtTemplate.go @@ -0,0 +1,71 @@ +package template + +const ( + yurthubTemplate = `apiVersion: v1 +kind: Pod +metadata: + labels: + k8s-app: yurt-hub + name: yurt-hub + namespace: kube-system +spec: + volumes: + - name: hub-dir + hostPath: + path: /var/lib/yurthub + type: DirectoryOrCreate + - name: kubernetes + hostPath: + path: /etc/kubernetes + type: Directory + - name: pem-dir + hostPath: + path: /var/lib/kubelet/pki + type: Directory + containers: + - name: yurt-hub + image: openyurt/yurthub:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - name: hub-dir + mountPath: /var/lib/yurthub + - name: kubernetes + mountPath: /etc/kubernetes + - name: pem-dir + mountPath: /var/lib/kubelet/pki + command: + - yurthub + - --v=2 + - --server-addr=https://__kubernetes_master_address__ + - --node-name=$(NODE_NAME) + - --join-token=__bootstrap_token__ + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /v1/healthz + port: 10267 + initialDelaySeconds: 300 + periodSeconds: 5 + failureThreshold: 3 + resources: + requests: + cpu: 150m + memory: 150Mi + limits: + memory: 300Mi + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + hostNetwork: true + priorityClassName: system-node-critical + priority: 2000001000` +) + +func GetYurtHubConfig() string { + return yurthubTemplate +}