diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index dda6d6a87..d420155fd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,7 +7,7 @@ on: - 'docs/**' - '**.md' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/build_setup.yml b/.github/workflows/build_setup.yml index 415b9099d..8d78fb7ca 100644 --- a/.github/workflows/build_setup.yml +++ b/.github/workflows/build_setup.yml @@ -7,7 +7,7 @@ on: - 'docs/**' - '**.md' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9ab770af0..9e27f2945 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -16,7 +16,7 @@ on: branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] pull_request: # The branches below must be a subset of the branches above - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] workflow_dispatch: schedule: - cron: '40 13 * * 2' diff --git a/.github/workflows/firecracker_cri_tests.yml b/.github/workflows/firecracker_cri_tests.yml index 303ba5e4f..5286c4d1b 100644 --- a/.github/workflows/firecracker_cri_tests.yml +++ b/.github/workflows/firecracker_cri_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/gvisor_cri_tests.yml b/.github/workflows/gvisor_cri_tests.yml index 237368072..33d1a77a0 100644 --- a/.github/workflows/gvisor_cri_tests.yml +++ b/.github/workflows/gvisor_cri_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index c910655b3..8b259e078 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml index a4c44d947..2383bb0b3 100644 --- a/.github/workflows/linters.yml +++ b/.github/workflows/linters.yml @@ -3,7 +3,7 @@ on: push: branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] jobs: build: diff --git a/.github/workflows/stargz_tests.yml b/.github/workflows/stargz_tests.yml index e2ea66753..5cddc2060 100644 --- a/.github/workflows/stargz_tests.yml +++ b/.github/workflows/stargz_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 99c9f027e..2b7c4bde6 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] paths-ignore: - 'docs/**' - '**.md' diff --git a/scripts/openyurt-deployer/README.md b/scripts/openyurt-deployer/README.md new file mode 100644 index 000000000..10b79f0c8 --- /dev/null +++ b/scripts/openyurt-deployer/README.md @@ -0,0 +1,120 @@ +# Quick set-up `OpenYurt` + +## 1. Introduction + +This program extends [`EasyOpenyurt`](https://github.com/flyinghorse0510/easy_openyurt) to automate the set up process of an `OpenYurt` cluster. + +It support setting up a Kubernetes cluster using kubeadm and then deploy `OpenYurt` and Knative on it. It is compatible with vHive stock-only mode. + +## 2. Brief overview + +**Prerequisite of nodes:** +1. The scripts has been tested on [cloud-lab](https://www.cloudlab.us/), suggested profile is: [`openyurt-demo`](https://www.cloudlab.us/p/ntu-cloud/openyurt-demo), with one master node, one cloud worker node and one edge worker node +2. Ensure that SSH authentication is possible from local device to all nodes. + + +**Components:** + +| Files | Purpose | +| :----------: | :---: | +| main.go | script entry point | +| `conf.json` | json files that stores cluster's configuration | +| node | executing commands on remote nodes through ssh | +| configs | node runtime configurations | + +**Description** + +1. Prepare system environment for all nodes, installing kubeadm, kubectl, dependencies, etc. +2. On master node, init the cluster using `kubeadm init` and in each worker node, join the initialized cluster. +3. On top of the created cluster, init `openyurt` cluster both on master nodes and worker nodes, then expand to all worker nodes from master nodes. +4. (Optional) Deploy Knative (vHive stock-only mode compatible) + +## 3. Usage +```bash +./openyurt_deployer deploy # deploy openyurt on the cluster +``` +```bash +./openyurt_deployer clean # clean the openyurt cluster and restore it to initial state +``` + +### 3.1 Preparations +1. Prepare a cluster with at least two nodes. +2. Change the contents in `conf.json` to following format: +```plaintext +{ + "master": "user@master", + "workers": { + "cloud": [ + "user@cloud-0" + ], + "edge": [ + "user@edge-0" + ] + } +} +``` + +### 3.2 Run Script + +```bash +go build . +./openyurt_deployer deploy +``` +If it gives out error like: +``` +FATA[0001] Failed to connect to: username@host +``` +Please execute: +``` +eval `ssh-agent -s` && ssh-add ~/.ssh/ +``` +For example: +``` +eval `ssh-agent -s` && ssh-add ~/.ssh/id_rsa +``` +And try again + + +## 4. Demo: Create `NodePool` And Deploy service on it +**Referenced from [`OpenYurt`](https://openyurt.io/docs/user-manuals/workload/node-pool-management)* + +The demo would deploy a helloworld function to cloud node pool or edge node pool + +Deploy the demo: +``` +./openyurt_deployer demo-c +``` +or: +``` +./openyurt_deployer demo-e +``` +where `demo-c` would deploy the service to the cloud node pool and `demo-e` would deploy the service to the edge node pool. + +The demo code will also show information about node pool after deployment. +The name for `demo-c` would be `helloworld-cloud`, while the name for `demo-e` would be `helloworld-edge` +It will also show the services' `URL` so you can try to invoke it on the master node. + +You can check the node pool information simply by: +``` +./openyurt_deployer demo-print +``` +Or delete the services deployed on node pool by: +``` +./openyurt_deployer demo-clear +``` + +The demo code will also show information about node pool after deployment. + +### 4.1 Invoke the Services (Optional) +You can try to invoke the services created by `demo-c` or `demo-e` on master node. +First, ssh to master node, following commands should all be executed on master node. +``` +ssh @ +git clone https://github.com/vhive-serverless/vSwarm.git +cd vSwarm/tools/test-client && go build . +./test-client --addr $URL:80 --name "Hello there" +``` + +Here `$URL` should be the `URL` returned in the previous part when deploying cloud and edge services, you can also get it from: `kubectl get ksvc`, but discarding the `http://` at the beginning. + +After invoking, you can use `kubectl get pods -o wide` to check whether the pods have been auto-scaled. diff --git a/scripts/openyurt-deployer/conf.json b/scripts/openyurt-deployer/conf.json new file mode 100644 index 000000000..ff23bc25f --- /dev/null +++ b/scripts/openyurt-deployer/conf.json @@ -0,0 +1,11 @@ +{ + "master": "username@masterip", + "workers": { + "cloud": [ + "username@cloudip" + ], + "edge": [ + "username@edgeip" + ] + } +} \ No newline at end of file diff --git a/scripts/openyurt-deployer/configs/configs.go b/scripts/openyurt-deployer/configs/configs.go new file mode 100644 index 000000000..64bd01fd6 --- /dev/null +++ b/scripts/openyurt-deployer/configs/configs.go @@ -0,0 +1,91 @@ +package configs + +import ( + "encoding/json" + "io" + "os" + "path" + + utils "github.com/vhive-serverless/vHive/scripts/utils" +) + +// Decode specific config files (JSON format) +func DecodeConfig(configFilePath string, configStruct interface{}) error { + // Open & read the config file + configFile, err := os.Open(configFilePath) + if err != nil { + return err + } + defer configFile.Close() + + // Read file content + configContent, err := io.ReadAll(configFile) + if err != nil { + return err + } + + // Decode json into struct + err = json.Unmarshal(configContent, configStruct) + + return err + +} + +// Load knative config files +func (knative *KnativeConfigStruct) LoadConfig() error { + var err error + // Check config directory + if len(VHive.VHiveSetupConfigPath) == 0 { + VHive.VHiveSetupConfigPath, err = utils.GetVHiveFilePath("configs/setup") + if err != nil { + utils.CleanEnvironment() + os.Exit(1) + } + } + // Get the (absolute) path of the config file + configFilePath := path.Join(VHive.VHiveSetupConfigPath, "knative.json") + + // Decode json into struct + err = DecodeConfig(configFilePath, knative) + + return err + +} + +// Load kubernetes config files +func (kube *KubeConfigStruct) LoadConfig() error { + // Get the (absolute) path of the config file + configFilePath := path.Join(VHive.VHiveSetupConfigPath, "kube.json") + + // Decode json into struct + err := DecodeConfig(configFilePath, kube) + + return err +} + +// Load system config files +func (system *SystemEnvironmentStruct) LoadConfig() error { + // Get the (absolute) path of the config file + configFilePath := path.Join(VHive.VHiveSetupConfigPath, "system.json") + + // Decode json into struct + err := DecodeConfig(configFilePath, system) + + return err +} + +// Load vHive config files +func (vhive *VHiveConfigStruct) LoadConfig() error { + // Get the (absolute) path of the config file + configFilePath := path.Join(VHive.VHiveSetupConfigPath, "vhive.json") + + // Decode json into struct + err := DecodeConfig(configFilePath, vhive) + + return err + +} + +const ( + Version = "0.2.4b" // Version Info +) diff --git a/scripts/openyurt-deployer/configs/demo.go b/scripts/openyurt-deployer/configs/demo.go new file mode 100644 index 000000000..bb43769e9 --- /dev/null +++ b/scripts/openyurt-deployer/configs/demo.go @@ -0,0 +1,21 @@ +package configs + +type DemoEnvironment struct { + CloudYamlFile string + EdgeYamlFile string + CloudBenchYamlFile string + EdgeBenchYamlFile string + YurtAppSetYamlFile string + CloudPoolName string + EdgePoolName string +} + +var Demo = DemoEnvironment{ + CloudYamlFile: "cloud.yaml", + EdgeYamlFile: "edge.yaml", + CloudBenchYamlFile: "cloud-bench.yaml", + EdgeBenchYamlFile: "edge-bench.yaml", + YurtAppSetYamlFile: "yurt.yaml", + CloudPoolName: "cloud", + EdgePoolName: "edge", +} diff --git a/scripts/openyurt-deployer/configs/go.mod b/scripts/openyurt-deployer/configs/go.mod new file mode 100644 index 000000000..e19ce9147 --- /dev/null +++ b/scripts/openyurt-deployer/configs/go.mod @@ -0,0 +1,3 @@ +module github.com/vhive-serverless/vhive/scripts/openyurt_deployer/configs + +go 1.20 \ No newline at end of file diff --git a/scripts/openyurt-deployer/configs/knative.go b/scripts/openyurt-deployer/configs/knative.go new file mode 100644 index 000000000..557cb0685 --- /dev/null +++ b/scripts/openyurt-deployer/configs/knative.go @@ -0,0 +1,35 @@ +package configs + +import "fmt" + +type KnativeConfigStruct struct { + KnativeVersion string + IstioVersion string + IstioDownloadUrlTemplate string + IstioOperatorConfigUrl string + MetalLBVersion string + MetalLBConfigURLArray []string + LocalRegistryRepoVolumeSize string + LocalRegistryVolumeConfigUrl string + LocalRegistryDockerRegistryConfigUrl string + LocalRegistryHostUpdateConfigUrl string + MagicDNSConfigUrl string + VHiveMode bool +} + +var Knative = KnativeConfigStruct{ + IstioOperatorConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/istio/istio-minimal-operator.yaml", + IstioDownloadUrlTemplate: "https://github.com/istio/istio/releases/download/%s/istio-%s-linux-%s.tar.gz", + MetalLBConfigURLArray: []string{ + "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/metallb/metallb-ipaddresspool.yaml", + "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/metallb/metallb-l2advertisement.yaml"}, + LocalRegistryVolumeConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/repository-volume.yaml", + LocalRegistryDockerRegistryConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/docker-registry.yaml", + LocalRegistryHostUpdateConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/repository-update-hosts.yaml", //TODO: uses path + MagicDNSConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/knative_yamls/serving-default-domain.yaml", //TODO: uses path + VHiveMode: true, +} + +func (knative *KnativeConfigStruct) GetIstioDownloadUrl() string { + return fmt.Sprintf(knative.IstioDownloadUrlTemplate, knative.IstioVersion, knative.IstioVersion, System.CurrentArch) +} diff --git a/scripts/openyurt-deployer/configs/kube.go b/scripts/openyurt-deployer/configs/kube.go new file mode 100644 index 000000000..b88f21c46 --- /dev/null +++ b/scripts/openyurt-deployer/configs/kube.go @@ -0,0 +1,14 @@ +package configs + +type KubeConfigStruct struct { + K8sVersion string + AlternativeImageRepo string + ApiserverAdvertiseAddress string + PodNetworkCidr string + PodNetworkAddonConfigURL string + ApiserverPort string + ApiserverToken string + ApiserverTokenHash string +} + +var Kube = KubeConfigStruct{} diff --git a/scripts/openyurt-deployer/configs/system.go b/scripts/openyurt-deployer/configs/system.go new file mode 100644 index 000000000..b2d1a4de7 --- /dev/null +++ b/scripts/openyurt-deployer/configs/system.go @@ -0,0 +1,74 @@ +package configs + +import ( + "fmt" + "runtime" +) + +// System environment struct +type SystemEnvironmentStruct struct { + GoInstalled bool + ContainerdInstalled bool + RuncInstalled bool + CniPluginsInstalled bool + SystemdStartUp bool + NodeHostName string + GoVersion string + GoDownloadUrlTemplate string + ContainerdVersion string + ContainerdDownloadUrlTemplate string + ContainerdSystemdProfileDownloadUrl string + RuncVersion string + RuncDownloadUrlTemplate string + RunscVersion string + RunscDownloadUrlTemplate string + CniPluginsVersion string + CniPluginsDownloadUrlTemplate string + KubeVersion string + Dependencies string + TmpDir string + CurrentOS string + CurrentArch string + CurrentDir string + UserHomeDir string + PmuToolsRepoUrl string + ProtocVersion string + ProtocDownloadUrlTemplate string +} + +// Current system environment +var System = SystemEnvironmentStruct{ + GoInstalled: false, + ContainerdInstalled: false, + RuncInstalled: false, + CniPluginsInstalled: false, + SystemdStartUp: true, + CurrentOS: runtime.GOOS, + CurrentArch: runtime.GOARCH, + CurrentDir: "", + UserHomeDir: "", + NodeHostName: "", +} + +func (system *SystemEnvironmentStruct) GetProtocDownloadUrl() string { + return fmt.Sprintf(system.ProtocDownloadUrlTemplate, system.ProtocVersion, system.ProtocVersion) +} + +func (system *SystemEnvironmentStruct) GetContainerdDownloadUrl() string { + return fmt.Sprintf(system.ContainerdDownloadUrlTemplate, system.ContainerdVersion, system.ContainerdVersion, system.CurrentArch) +} + +func (system *SystemEnvironmentStruct) GetRuncDownloadUrl() string { + return fmt.Sprintf(system.RuncDownloadUrlTemplate, system.RuncVersion, system.CurrentArch) +} + +func (system *SystemEnvironmentStruct) GetRunscDownloadUrl() string { + unameArch := system.CurrentArch + switch unameArch { + case "amd64": + unameArch = "x86_64" + default: + } + + return fmt.Sprintf(system.RunscDownloadUrlTemplate, system.RunscVersion, unameArch) +} diff --git a/scripts/openyurt-deployer/configs/vhive.go b/scripts/openyurt-deployer/configs/vhive.go new file mode 100644 index 000000000..e3ec12ebd --- /dev/null +++ b/scripts/openyurt-deployer/configs/vhive.go @@ -0,0 +1,19 @@ +package configs + +type VHiveConfigStruct struct { + FirecrackerKernelImgDownloadUrl string + StargzVersion string + VHiveRepoPath string + VHiveRepoBranch string + VHiveRepoUrl string + VHiveSetupConfigPath string + ForceRemote bool +} + +var VHive = VHiveConfigStruct{ + VHiveRepoPath: ".", + VHiveRepoBranch: "main", + VHiveRepoUrl: "https://github.com/vhive-serverless/vHive.git", + VHiveSetupConfigPath: "../../configs/setup", + ForceRemote: false, +} diff --git a/scripts/openyurt-deployer/configs/yurt.go b/scripts/openyurt-deployer/configs/yurt.go new file mode 100644 index 000000000..10cc29f2a --- /dev/null +++ b/scripts/openyurt-deployer/configs/yurt.go @@ -0,0 +1,25 @@ +package configs + +type YurtEnvironment struct { + HelmInstalled bool + HelmPublicSigningKeyDownloadUrl string + KustomizeInstalled bool + KustomizeScriptDownloadUrl string + MasterAsCloud bool + WorkerNodeName string + WorkerAsEdge bool + Dependencies string + YurtVersion string +} + +var Yurt = YurtEnvironment{ + HelmInstalled: false, + HelmPublicSigningKeyDownloadUrl: "https://baltocdn.com/helm/signing.asc", + KustomizeInstalled: false, + KustomizeScriptDownloadUrl: "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh", + MasterAsCloud: true, + WorkerNodeName: "", + WorkerAsEdge: true, + Dependencies: "", + YurtVersion: "1.2.1", +} diff --git a/scripts/openyurt-deployer/go.mod b/scripts/openyurt-deployer/go.mod new file mode 100644 index 000000000..88687a0ae --- /dev/null +++ b/scripts/openyurt-deployer/go.mod @@ -0,0 +1,15 @@ +module github.com/vhive-serverless/vhive/scripts/openyurt_deployer + +go 1.20 + +require ( + github.com/davidmz/go-pageant v1.0.2 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/pkg/sftp v1.13.4 // indirect + github.com/sfreiberg/simplessh v0.0.0-20220719182921-185eafd40485 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231018022901-6a0c478d2c9f // indirect + github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231018022901-6a0c478d2c9f // indirect + golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect +) diff --git a/scripts/openyurt-deployer/go.sum b/scripts/openyurt-deployer/go.sum new file mode 100644 index 000000000..9f7752abe --- /dev/null +++ b/scripts/openyurt-deployer/go.sum @@ -0,0 +1,35 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0= +github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/pkg/sftp v1.13.4 h1:Lb0RYJCmgUcBgZosfoi9Y9sbl6+LJgOIgk/2Y4YjMFg= +github.com/pkg/sftp v1.13.4/go.mod h1:LzqnAvaD5TWeNBsZpfKxSYn1MbjWwOsCIAFFJbpIsK8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sfreiberg/simplessh v0.0.0-20220719182921-185eafd40485 h1:ZMBZ2DKX1sScUSo9ZUwGI7jCMukslPNQNfZaw9vVyfY= +github.com/sfreiberg/simplessh v0.0.0-20220719182921-185eafd40485/go.mod h1:9qeq2P58+4+LyuncL3waJDG+giOfXgowfrRZZF9XdWk= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231018022901-6a0c478d2c9f h1:2Cyd5RJaZ0Pdyk7Az338/PKeofj7RLZjD5rSAO+wqvk= +github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231018022901-6a0c478d2c9f/go.mod h1:nJSon4Eng7PdZ4HJX9dnZ7H4qxVm/r5zseFPfom7Jto= +github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231018022901-6a0c478d2c9f h1:3KArl/h4PpkhAhlpPAMarh9hrj6AWYfnhkoXZCS2AWw= +github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231018022901-6a0c478d2c9f/go.mod h1:xyjKlPn6JqSQtzKOCu8L4DW4rQcNmhxK9f97cOPo0Sg= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/scripts/openyurt-deployer/go.work b/scripts/openyurt-deployer/go.work new file mode 100644 index 000000000..8c0d69e84 --- /dev/null +++ b/scripts/openyurt-deployer/go.work @@ -0,0 +1,8 @@ +go 1.19 + +use( +. +./configs +./node +./template +) \ No newline at end of file diff --git a/scripts/openyurt-deployer/main.go b/scripts/openyurt-deployer/main.go new file mode 100644 index 000000000..86e414443 --- /dev/null +++ b/scripts/openyurt-deployer/main.go @@ -0,0 +1,187 @@ +package main + +import ( + "encoding/json" + "flag" + "os" + + "github.com/vhive-serverless/vHive/scripts/utils" + + log "github.com/sirupsen/logrus" + "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/configs" + "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/node" +) + +type NodesInfo struct { + Master string `json:"master"` + Workers Workers `json:"workers"` +} + +type Workers struct { + Cloud []string `json:"cloud"` + Edge []string `json:"edge"` +} + +func main() { + if len(os.Args) != 2 { + utils.InfoPrintf("Usage: %s [Parameters...]\n", os.Args[0]) + os.Exit(-1) + } + + var ( + deployerConf = flag.String("conf", "conf.json", + `Configuration file with the following structure: + { + "master": "user@master", + "workers": { + "cloud": [ + "user@cloud-0" + ], + "edge": [ + "user@edge-0" + ] + } + } + `) + logLvl = flag.String("loglvl", "debug", "Debug level: 'info' or 'debug'") + ) + flag.Parse() + log.SetOutput(os.Stdout) + switch *logLvl { + case "info": + log.SetLevel(log.InfoLevel) + case "debug": + log.SetLevel(log.DebugLevel) + log.Debug("Debug mode is enabled") + default: + log.Fatalf("Invalid log level: '%s', expected 'info' or 'debug'", *logLvl) + } + operation := os.Args[1] + switch operation { + case "deploy": + deployNodes(*deployerConf) + // case "clean": + // cleanNodes(*deployerConf) + // case "demo-e": + // demo(*deployerConf, false) + // case "demo-c": + // demo(*deployerConf, true) + // case "demo-clear": + // delDemo(*deployerConf) + // case "demo-print": + // printDemo(*deployerConf) + // case "deploy-yurt": + // deployOpenYurt(*deployerConf) + default: + utils.InfoPrintf("Usage: %s [Parameters...]\n", os.Args[0]) + os.Exit(-1) + } + +} + +func readAndUnMarshall(deployerConfFile string) (NodesInfo, error) { + log.Debugf("Opening openyurt deployer configuration JSON: %s", deployerConfFile) + deployerConfJSON, err := os.ReadFile(deployerConfFile) + if err != nil { + log.Fatalf("Failed to open configuration file :%s", err) + } + + log.Debugf("Unmarshaling nodes JSON: %s", deployerConfJSON) + var nodesInfo NodesInfo + err = json.Unmarshal(deployerConfJSON, &nodesInfo) + if err != nil { + log.Fatalf("Failed to unmarshal nodes JSON: %s", err) + } + return nodesInfo, nil +} + +func parseNodeInfo(nodesInfo NodesInfo) []node.Node { + masterName := nodesInfo.Master + cloudNames := nodesInfo.Workers.Cloud + edgeNames := nodesInfo.Workers.Edge + nodeList := []node.Node{} + + // Load configs from configs/setup.json + configs.System.LoadConfig() + configs.Knative.LoadConfig() + configs.Kube.LoadConfig() + + masterNode := node.Node{Name: masterName, Client: SetupSSHConn(masterName), NodeRole: "master", Configs: &node.NodeConfig{ + System: configs.System, + Kube: configs.Kube, + Knative: configs.Knative, + Yurt: configs.Yurt, + Demo: configs.Demo}} + nodeList = append(nodeList, masterNode) + for _, name := range cloudNames { + nodeList = append(nodeList, node.Node{Name: name, Client: SetupSSHConn(name), NodeRole: "cloud", Configs: &node.NodeConfig{ + System: configs.System, + Kube: configs.Kube, + Knative: configs.Knative, + Yurt: configs.Yurt, + Demo: configs.Demo}}) + } + + for _, name := range edgeNames { + nodeList = append(nodeList, node.Node{Name: name, Client: SetupSSHConn(name), NodeRole: "edge", Configs: &node.NodeConfig{ + System: configs.System, + Kube: configs.Kube, + Knative: configs.Knative, + Yurt: configs.Yurt, + Demo: configs.Demo}}) + } + + for _, node := range nodeList { + node.ReadSystemInfo() + utils.SuccessPrintf("Read system info on node:%s success!\n", node.Name) + } + + return nodeList +} + +func initializeNodes(nodesInfo NodesInfo) []node.Node { + nodeList := parseNodeInfo(nodesInfo) + + // init system, all nodes are the same + for _, node := range nodeList { + node.SystemInit() + utils.SuccessPrintf("Init system environment on node: %s success!\n", node.Name) + } + return nodeList +} + +func deployNodes(deployerConfFile string) { + + nodesInfo, err := readAndUnMarshall(deployerConfFile) + utils.CheckErrorWithMsg(err, "Failed to read and unmarshal deployer configuration JSON") + nodeList := initializeNodes(nodesInfo) + masterNode := nodeList[0] + workerNodes := nodeList[1:] + + // init kube cluster + utils.InfoPrintf("Start to init kube cluster!\n") + addr, port, token, hash := masterNode.KubeMasterInit() + utils.SuccessPrintf("Master init success, join the cluster with following command:\n sudo kubeadm join %s:%s --token %s --discovery-token-ca-cert-hash %s\n", + addr, port, token, hash) + for _, worker := range workerNodes { + worker.KubeWorkerJoin(addr, port, token, hash) + utils.InfoPrintf("worker %s joined cluster!\n", worker.Name) + } + nodesName := masterNode.GetAllNodes() + utils.InfoPrintf("All nodes within the cluster: [") + for _, name := range nodesName { + utils.InfoPrintf(name) + } + utils.InfoPrintf("]\n") + + // init knative + utils.SuccessPrintf("Start to init knative\n") + masterNode.InstallKnativeServing() + masterNode.InstallKnativeEventing() + utils.SuccessPrintf("Knative has been installed!\n") + + // init demo environment + masterNode.BuildDemo(workerNodes) + + utils.SuccessPrintf(">>>>>>>>>>>>>>>>OpenYurt Cluster Deployment Finished!<<<<<<<<<<<<<<<\n") +} diff --git a/scripts/openyurt-deployer/node/demo.go b/scripts/openyurt-deployer/node/demo.go new file mode 100644 index 000000000..840ff37f9 --- /dev/null +++ b/scripts/openyurt-deployer/node/demo.go @@ -0,0 +1,127 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/vhive-serverless/vHive/scripts/utils" + "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/template" +) + +// Builds cloud and edge nodepools +func (masterNode *Node) BuildDemo(workerNodes []Node) { + + masterNode.GetUserHomeDir() + masterNode.GetNodeHostName() + + var err error + // cloud.yaml + cloudPoolName := masterNode.Configs.Demo.CloudPoolName + edgePoolName := masterNode.Configs.Demo.EdgePoolName + + cloudFile := fmt.Sprintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.CloudYamlFile) + edgeFile := fmt.Sprintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.EdgeYamlFile) + // yurtFile := utils.InfoPrintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.YurtAppSetYamlFile) + + createCloudNpTemplate := template.CreateCloudNpTemplate() + utils.WaitPrintf("Creating yaml files for cloud nodepool") + _, err = masterNode.ExecShellCmd(createCloudNpTemplate, cloudPoolName, cloudFile) + utils.CheckErrorWithTagAndMsg(err, "Failed to create yaml for cloud\n") + + // edge.yaml + createEdgeNpTemplate := template.CreateEdgeNpTemplate() + utils.WaitPrintf("Creating yaml files for edge nodepool") + _, err = masterNode.ExecShellCmd(createEdgeNpTemplate, edgePoolName, edgeFile) + utils.CheckErrorWithTagAndMsg(err, "Failed to create yaml for edge\n") + + //label master as cloud TODO not just master, but all cloud nodes + utils.WaitPrintf("Labeling master") + _, err = masterNode.ExecShellCmd(`kubectl label node %s apps.openyurt.io/desired-nodepool=%s`, masterNode.Configs.System.NodeHostName, cloudPoolName) + utils.CheckErrorWithTagAndMsg(err, "Master Cloud label fail\n") + + //label edge + utils.WaitPrintf("Labeling workers") + for _, worker := range workerNodes { + worker.GetNodeHostName() + var desiredNpName string + if worker.NodeRole == "cloud" { + desiredNpName = cloudPoolName + } else { + desiredNpName = edgePoolName + } + _, err = masterNode.ExecShellCmd("kubectl label node %s apps.openyurt.io/desired-nodepool=%s", worker.Configs.System.NodeHostName, desiredNpName) + utils.CheckErrorWithTagAndMsg(err, "worker label fail\n") + } + utils.SuccessPrintf("Label success\n") + + utils.WaitPrintf("Apply cloud.yaml") + _, err = masterNode.ExecShellCmd("kubectl apply -f %s", cloudFile) + utils.CheckErrorWithTagAndMsg(err, "Failed to apply cloud.yaml\n") + + utils.WaitPrintf("Apply edge.yaml") + _, err = masterNode.ExecShellCmd("kubectl apply -f %s", edgeFile) + utils.CheckErrorWithTagAndMsg(err, "Failed to apply edge.yaml\n") +} + +func (masterNode *Node) Demo(isCloud bool) { + + masterNode.GetUserHomeDir() + masterNode.GetNodeHostName() + + var err error + cloudPoolName := masterNode.Configs.Demo.CloudPoolName + edgePoolName := masterNode.Configs.Demo.EdgePoolName + + utils.WaitPrintf("Creating benchmark's yaml file and apply it") + benchmarkTemplate := template.GetBenchmarkTemplate() + if isCloud { + _, err = masterNode.ExecShellCmd(benchmarkTemplate, "cloud", cloudPoolName, "cloud", masterNode.Configs.Demo.CloudBenchYamlFile) + _, err = masterNode.ExecShellCmd("kubectl apply -f %s", masterNode.Configs.Demo.CloudBenchYamlFile) + } else { + _, err = masterNode.ExecShellCmd(benchmarkTemplate, "edge", edgePoolName, "edge", masterNode.Configs.Demo.EdgeBenchYamlFile) + _, err = masterNode.ExecShellCmd("kubectl apply -f %s", masterNode.Configs.Demo.EdgeBenchYamlFile) + } + utils.CheckErrorWithTagAndMsg(err, "Failed to create benchmark's yaml file and apply it") + +} + +func (masterNode *Node) PrintDemoInfo(workerNodes []Node, isCloud bool) { + utils.InfoPrintf("NodePool Information:\n") + utils.InfoPrintf("+--------------------------------------------------------------------+\n") + npType := "cloud" + if !isCloud { + npType = "edge" + } + + poolName := masterNode.Configs.Demo.CloudPoolName + if !isCloud { + poolName = masterNode.Configs.Demo.EdgePoolName + } + + utils.InfoPrintf("+%s Nodepool %s:\n", npType, poolName) + utils.InfoPrintf("+Nodes:\n") + if isCloud { + utils.InfoPrintf("+\tnode: %s <- Master\n", masterNode.Configs.System.NodeHostName) + } + for _, worker := range workerNodes { + worker.GetNodeHostName() + if worker.NodeRole == npType { + utils.InfoPrintf("+\tnode: %s\n", worker.Configs.System.NodeHostName) + } + } + + shellOut, _ := masterNode.ExecShellCmd("kubectl get ksvc | grep '\\-%s' | awk '{print $1, substr($2, 8)}'", npType) + var serviceName string + var serviceURL string + splittedOut := strings.Split(shellOut, " ") + if len(splittedOut) != 2 { + serviceName = "Null" + serviceURL = "Null" + } else { + serviceName = splittedOut[0] + serviceURL = splittedOut[1] + } + utils.SuccessPrintf("+Service: Name: [%s] with URL [%s]\n", serviceName, serviceURL) + utils.InfoPrintf("+--------------------------------------------------------------------+\n") + +} diff --git a/scripts/openyurt-deployer/node/go.mod b/scripts/openyurt-deployer/node/go.mod new file mode 100644 index 000000000..0102c784d --- /dev/null +++ b/scripts/openyurt-deployer/node/go.mod @@ -0,0 +1,3 @@ +module github.com/vhive-serverless/vhive/scripts/openyurt_deployer/node + +go 1.20 diff --git a/scripts/openyurt-deployer/node/knative.go b/scripts/openyurt-deployer/node/knative.go new file mode 100644 index 000000000..9323a530d --- /dev/null +++ b/scripts/openyurt-deployer/node/knative.go @@ -0,0 +1,129 @@ +package node + +import ( + "fmt" + + "github.com/vhive-serverless/vHive/scripts/utils" +) + +// Install Knative Serving +func (node *Node) InstallKnativeServing() { + node.OnlyExecByMaster() + var err error + + node.CreateTmpDir() + defer node.CleanUpTmpDir() + + // Install and configure MetalLB + utils.WaitPrintf("Installing and configuring MetalLB") + _, err = node.ExecShellCmd(`kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e "s/strictARP: false/strictARP: true/" | kubectl apply -f - -n kube-system`) + utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") + _, err = node.ExecShellCmd("kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v%s/config/manifests/metallb-native.yaml", node.Configs.Knative.MetalLBVersion) + utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") + _, err = node.ExecShellCmd("kubectl -n metallb-system wait deploy controller --timeout=90s --for=condition=Available") + utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") + for _, value := range node.Configs.Knative.MetalLBConfigURLArray { + _, err = node.ExecShellCmd("kubectl apply -f %s", value) + utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") + } + utils.SuccessPrintf("\n") + + // Install istio + // Download istio + utils.WaitPrintf("Downloading istio") + istioFilePath, err := node.DownloadToTmpDir(node.GetIstioDownloadUrl()) + utils.CheckErrorWithTagAndMsg(err, "Failed to download istio!") + // Extract istio + utils.WaitPrintf("Extracting istio") + err = node.ExtractToDir(istioFilePath, "/usr/local", true) + utils.CheckErrorWithTagAndMsg(err, "Failed to extract istio!") + // Update PATH + err = node.AppendDirToPath("/usr/local/istio-%s/bin", node.Configs.Knative.IstioVersion) + utils.CheckErrorWithMsg(err, "Failed to update PATH!") + // Deploy istio operator + utils.WaitPrintf("Deploying istio operator") + operatorConfigPath, err := node.DownloadToTmpDir(node.Configs.Knative.IstioOperatorConfigUrl) + utils.CheckErrorWithMsg(err, "Failed to deploy istio operator!") + _, err = node.ExecShellCmd("/usr/local/istio-%s/bin/istioctl install -y -f %s", node.Configs.Knative.IstioVersion, operatorConfigPath) + utils.CheckErrorWithTagAndMsg(err, "Failed to deploy istio operator!") + + // Install Knative Serving component + utils.WaitPrintf("Installing Knative Serving component") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/serving/releases/download/knative-v%s/serving-crds.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install Knative Serving component!") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/serving/releases/download/knative-v%s/serving-core.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithTagAndMsg(err, "Failed to install Knative Serving component!") + + // Install local cluster registry + utils.WaitPrintf("Installing local cluster registry") + _, err = node.ExecShellCmd("kubectl create namespace registry") + utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") + configFilePath, err := node.DownloadToTmpDir("%s", node.Configs.Knative.LocalRegistryVolumeConfigUrl) + utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") + _, err = node.ExecShellCmd("REPO_VOL_SIZE=%s envsubst < %s | kubectl create --filename -", node.Configs.Knative.LocalRegistryRepoVolumeSize, configFilePath) + utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") + _, err = node.ExecShellCmd("kubectl create -f %s && kubectl apply -f %s", node.Configs.Knative.LocalRegistryDockerRegistryConfigUrl, node.Configs.Knative.LocalRegistryHostUpdateConfigUrl) + utils.CheckErrorWithTagAndMsg(err, "Failed to install local cluster registry!") + + // Configure Magic DNS + utils.WaitPrintf("Configuring Magic DNS") + _, err = node.ExecShellCmd("kubectl apply -f %s", node.Configs.Knative.MagicDNSConfigUrl) + utils.CheckErrorWithTagAndMsg(err, "Failed to configure Magic DNS!") + + // Install networking layer + utils.WaitPrintf("Installing networking layer") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v%s/net-istio.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithTagAndMsg(err, "Failed to install networking layer!") + + // Logs for verification + _, err = node.ExecShellCmd("kubectl get pods -n knative-serving") + utils.CheckErrorWithMsg(err, "Verification Failed!") + + // // Configure DNS + // logs.WaitPrintf("Configuring DNS") + // _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/serving/releases/download/knative-v%s/serving-default-domain.yaml", node.Configs.Knative.KnativeVersion) + // logs.CheckErrorWithTagAndMsg(err, "Failed to configure DNS!") + + // enable node selector + utils.WaitPrintf("Enable node selector in knative serving") + _, err = node.ExecShellCmd(`kubectl patch cm config-features -n knative-serving \ + --type merge \ + -p '{"data":{"kubernetes.podspec-nodeselector":"enabled"}}' +`) + utils.CheckErrorWithTagAndMsg(err, "Failed to enable node selector in knative serving") + // node.enableNodeSelect() +} + +// Install Knative Eventing +func (node *Node) InstallKnativeEventing() { + // Install Knative Eventing component + utils.WaitPrintf("Installing Knative Eventing component") + _, err := node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/eventing-crds.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install Knative Eventing component!") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/eventing-core.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithTagAndMsg(err, "Failed to install Knative Eventing component!") + + // Logs for verification + _, err = node.ExecShellCmd("kubectl get pods -n knative-eventing") + utils.CheckErrorWithMsg(err, "Verification Failed!") + + // Install a default Channel (messaging) layer + utils.WaitPrintf("Installing a default Channel (messaging) layer") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/in-memory-channel.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithTagAndMsg(err, "Failed to install a default Channel (messaging) layer!") + + // Install a Broker layer + utils.WaitPrintf("Installing a Broker layer") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/mt-channel-broker.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithTagAndMsg(err, "Failed to install a Broker layer!") + + // Logs for verification + _, err = node.ExecShellCmd("kubectl --namespace istio-system get service istio-ingressgateway") + utils.CheckErrorWithMsg(err, "Verification Failed!") +} + +// get istio download URL +func (node *Node) GetIstioDownloadUrl() string { + knative := node.Configs.Knative + return fmt.Sprintf(knative.IstioDownloadUrlTemplate, knative.IstioVersion, knative.IstioVersion, node.Configs.System.CurrentArch) +} diff --git a/scripts/openyurt-deployer/node/kube.go b/scripts/openyurt-deployer/node/kube.go new file mode 100644 index 000000000..821d135ac --- /dev/null +++ b/scripts/openyurt-deployer/node/kube.go @@ -0,0 +1,120 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/vhive-serverless/vHive/scripts/utils" +) + +// Initialize the master node of Kubernetes cluster +func (node *Node) KubeMasterInit() (string, string, string, string) { + + // Initialize + var err error + node.check_kube_environment() + node.CreateTmpDir() + defer node.CleanUpTmpDir() + + // Pre-pull Image + utils.WaitPrintf("Pre-Pulling required images") + shellCmd := fmt.Sprintf("sudo kubeadm config images pull --kubernetes-version %s ", node.Configs.Kube.K8sVersion) + if len(node.Configs.Kube.AlternativeImageRepo) > 0 { + shellCmd = fmt.Sprintf(shellCmd+"--image-repository %s ", node.Configs.Kube.AlternativeImageRepo) + } + _, err = node.ExecShellCmd(shellCmd) + utils.CheckErrorWithTagAndMsg(err, "Failed to pre-pull required images!\n") + + // Deploy Kubernetes + utils.WaitPrintf("Deploying Kubernetes(version %s)", node.Configs.Kube.K8sVersion) + shellCmd = fmt.Sprintf("sudo kubeadm init --kubernetes-version %s --pod-network-cidr=\"%s\" ", node.Configs.Kube.K8sVersion, node.Configs.Kube.PodNetworkCidr) + if len(node.Configs.Kube.AlternativeImageRepo) > 0 { + shellCmd = fmt.Sprintf(shellCmd+"--image-repository %s ", node.Configs.Kube.AlternativeImageRepo) + } + if len(node.Configs.Kube.ApiserverAdvertiseAddress) > 0 { + shellCmd = fmt.Sprintf(shellCmd+"--apiserver-advertise-address=%s ", node.Configs.Kube.ApiserverAdvertiseAddress) + } + shellCmd = fmt.Sprintf(shellCmd+"| tee %s/masterNodeInfo", node.Configs.System.TmpDir) + _, err = node.ExecShellCmd(shellCmd) + utils.CheckErrorWithTagAndMsg(err, "Failed to deploy Kubernetes(version %s)!\n", node.Configs.Kube.K8sVersion) + + // Make kubectl work for non-root user + utils.WaitPrintf("Making kubectl work for non-root user") + _, err = node.ExecShellCmd("mkdir -p %s/.kube && sudo cp -i /etc/kubernetes/admin.conf %s/.kube/config && sudo chown $(id -u):$(id -g) %s/.kube/config", + node.Configs.System.UserHomeDir, + node.Configs.System.UserHomeDir, + node.Configs.System.UserHomeDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to make kubectl work for non-root user!\n") + + // Install Calico network add-on + utils.WaitPrintf("Installing pod network") + _, err = node.ExecShellCmd("kubectl apply -f %s", node.Configs.Kube.PodNetworkAddonConfigURL) + utils.CheckErrorWithTagAndMsg(err, "Failed to install pod network!\n") + + // Extract master node information from logs + utils.WaitPrintf("Extracting master node information from logs") + shellOut, err := node.ExecShellCmd("sed -n '/.*kubeadm join.*/p' < %s/masterNodeInfo | sed -n 's/.*join \\(.*\\):\\(\\S*\\) --token \\(\\S*\\).*/\\1 \\2 \\3/p'", node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to extract master node information from logs!\n") + splittedOut := strings.Split(shellOut, " ") + node.Configs.Kube.ApiserverAdvertiseAddress = splittedOut[0] + node.Configs.Kube.ApiserverPort = splittedOut[1] + node.Configs.Kube.ApiserverToken = splittedOut[2] + shellOut, err = node.ExecShellCmd("sed -n '/.*sha256:.*/p' < %s/masterNodeInfo | sed -n 's/.*\\(sha256:\\S*\\).*/\\1/p'", node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to extract master node information from logs!\n") + node.Configs.Kube.ApiserverTokenHash = shellOut + + return node.Configs.Kube.ApiserverAdvertiseAddress, + node.Configs.Kube.ApiserverPort, + node.Configs.Kube.ApiserverToken, + node.Configs.Kube.ApiserverTokenHash + +} + +func (node *Node) KubeClean() { + utils.InfoPrintf("Cleaning Kube in node: %s\n", node.Name) + var err error + if node.NodeRole == "master" { + // kubectl cordon {workerNodeName} + // kubectl drain {NodeName} --delete-local-data --force --ignore-daemonsets + // kubectl delete node {NodeName} + + utils.WaitPrintf("Reseting kube cluster and rm .kube file") + // TODO: delete master last, need to check defer can work or not + defer node.ExecShellCmd("sudo kubeadm reset -f && rm -rf $HOME/.kube && rm -rf /etc/cni/net.d") + // The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d + } else { + + utils.WaitPrintf("Reseting kube cluster") + _, err = node.ExecShellCmd("sudo kubeadm reset -f && rm -rf /etc/cni/net.d") + } + utils.CheckErrorWithTagAndMsg(err, "Failed to clean kube cluster!\n") + +} + +// Join worker node to Kubernetes cluster +func (node *Node) KubeWorkerJoin(apiServerAddr string, apiServerPort string, apiServerToken string, apiServerTokenHash string) { + + // Initialize + var err error + + // Join Kubernetes cluster + utils.WaitPrintf("Joining Kubernetes cluster") + _, err = node.ExecShellCmd("sudo kubeadm join %s:%s --token %s --discovery-token-ca-cert-hash %s", apiServerAddr, apiServerPort, apiServerToken, apiServerTokenHash) + utils.CheckErrorWithTagAndMsg(err, "Failed to join Kubernetes cluster!\n") +} + +func (node *Node) check_kube_environment() { + // Temporarily unused +} + +func (node *Node) GetAllNodes() []string { + utils.WaitPrintf("Get all nodes...") + if node.NodeRole != "master" { + utils.ErrorPrintf("GetAllNodes can only be executed on master node!\n") + return []string{} + } + out, err := node.ExecShellCmd("kubectl get nodes | awk 'NR>1 {print $1}'") + utils.CheckErrorWithMsg(err, "Failed to get nodes from cluster!\n") + nodeNames := strings.Split(out, "\n") + return nodeNames +} diff --git a/scripts/openyurt-deployer/node/node.go b/scripts/openyurt-deployer/node/node.go new file mode 100644 index 000000000..fb49a018b --- /dev/null +++ b/scripts/openyurt-deployer/node/node.go @@ -0,0 +1,51 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/sfreiberg/simplessh" + "github.com/vhive-serverless/vHive/scripts/utils" + "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/configs" +) + +type NodeConfig struct { + System configs.SystemEnvironmentStruct + Kube configs.KubeConfigStruct + Knative configs.KnativeConfigStruct + Yurt configs.YurtEnvironment + Demo configs.DemoEnvironment +} + +type Node struct { + Name string + Client *simplessh.Client + NodeRole string + Configs *NodeConfig +} + +func (node *Node) ExecShellCmd(cmd string, pars ...any) (string, error) { + shellCmd := fmt.Sprintf(cmd, pars...) + out, err := node.Client.Exec(shellCmd) + if err != nil { + utils.WarnPrintf("node: [%s] failed to exec: \n%s\nerror:%s\n", node.Name, shellCmd, out) + } + return strings.TrimSuffix(string(out), "\n"), err +} + +func (node *Node) OnlyExecByMaster() { + if node.NodeRole != "master" { + utils.FatalPrintf("This function can only be executed by master node!\n") + } +} + +func (node *Node) OnlyExecByWorker() { + if node.NodeRole == "master" { + utils.FatalPrintf("This function can only be executed by worker node!\n") + } +} + +func (node *Node) SetMasterAsCloud(asCloud bool) { + node.OnlyExecByMaster() + node.Configs.Yurt.MasterAsCloud = asCloud +} diff --git a/scripts/openyurt-deployer/node/system.go b/scripts/openyurt-deployer/node/system.go new file mode 100644 index 000000000..493df1054 --- /dev/null +++ b/scripts/openyurt-deployer/node/system.go @@ -0,0 +1,368 @@ +package node + +import ( + "fmt" + "path" + "strings" + + "github.com/vhive-serverless/vHive/scripts/utils" +) + +type ShellError struct { + msg string + exitCode int +} + +func (err *ShellError) Error() string { + return fmt.Sprintf("[exit %d] -> %s", err.exitCode, err.msg) +} + +// Detect current architecture +func (node *Node) DetectArch() { + utils.WaitPrintf("Detetcting current arch") + out, err := node.ExecShellCmd("dpkg --print-architecture") + utils.CheckErrorWithMsg(err, "Failed to get current arch!\n") + node.Configs.System.CurrentArch = out + switch node.Configs.System.CurrentArch { + default: + utils.InfoPrintf("Detected Arch: %s for node: %s\n", node.Configs.System.CurrentArch, node.Name) + } +} + +// Detect current operating system +func (node *Node) DetectOS() { + switch node.Configs.System.CurrentOS { + case "windows": + utils.FatalPrintf("Unsupported OS: %s\n", node.Configs.System.CurrentOS) + default: + var err error + node.Configs.System.CurrentOS, err = node.ExecShellCmd("sed -n 's/^NAME=\"\\(.*\\)\"/\\1/p' < /etc/os-release | head -1 | tr '[:upper:]' '[:lower:]'") + utils.InfoPrintf("Detected OS: %s\n", node.Configs.System.CurrentOS) + utils.CheckErrorWithMsg(err, "Failed to get Linux distribution info!\n") + switch node.Configs.System.CurrentOS { + case "ubuntu": + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + } + utils.InfoPrintf("Detected OS: %s for node: %s\n", strings.TrimSuffix(string(node.Configs.System.CurrentOS), "\n"), node.Name) + } +} + +// Get current directory +func (node *Node) GetCurrentDir() { + var err error + node.Configs.System.CurrentDir, err = node.ExecShellCmd("pwd") + utils.CheckErrorWithMsg(err, "Failed to get get current directory!\n") +} + +// Get current home directory +func (node *Node) GetUserHomeDir() { + var err error + node.Configs.System.UserHomeDir, err = node.ExecShellCmd("echo $HOME") + utils.CheckErrorWithMsg(err, "Failed to get current home directory!\n") +} + +// Get current node's hostname +func (node *Node) GetNodeHostName() { + var err error + node.Configs.System.NodeHostName, err = node.ExecShellCmd("echo $HOSTNAME") + utils.CheckErrorWithMsg(err, "Failed to get current node hostname!\n") +} + +// Create temporary directory +func (node *Node) CreateTmpDir() { + var err error + utils.InfoPrintf("Creating temporary directory") + tmpDir := "~/yurt_tmp" + _, err = node.ExecShellCmd("mkdir -p %s", tmpDir) + node.Configs.System.TmpDir = tmpDir + utils.CheckErrorWithTagAndMsg(err, "Failed to create temporary directory!\n") +} + +// Clean up temporary directory +func (node *Node) CleanUpTmpDir() { + utils.InfoPrintf("Cleaning up temporary directory") + _, err := node.ExecShellCmd("rm -rf %s/*", node.Configs.System.TmpDir) + utils.CheckErrorWithTagAndMsg(err, "Failed to create temporary directory!\n") +} + +// Extract arhive file to specific directory(currently support .tar.gz file only) +func (node *Node) ExtractToDir(filePath string, dirPath string, privileged bool) error { + var err error + if privileged { + _, err = node.ExecShellCmd("sudo tar -xzvf %s -C %s", filePath, dirPath) + } else { + _, err = node.ExecShellCmd("tar -xzvf %s -C %s", filePath, dirPath) + } + return err +} + +// Append directory to PATH variable for bash & zsh +func (node *Node) AppendDirToPath(pathTemplate string, pars ...any) error { + appendedPath := fmt.Sprintf(pathTemplate, pars...) + + // For bash + _, err := node.ExecShellCmd("echo 'export PATH=$PATH:%s' >> %s/.bashrc", appendedPath, node.Configs.System.UserHomeDir) + if err != nil { + return err + } + // For zsh + _, err = node.LookPath("zsh") + if err != nil { + _, err = node.ExecShellCmd("echo 'export PATH=$PATH:%s' >> %s/.zshrc", appendedPath, node.Configs.System.UserHomeDir) + } + return err +} + +// Turn off unattended-upgrades +func (node *Node) TurnOffAutomaticUpgrade() (string, error) { + switch node.Configs.System.CurrentOS { + case "ubuntu": + _, err := node.ExecShellCmd("stat /etc/apt/apt.conf.d/20auto-upgrades") + if err == nil { + return node.ExecShellCmd("sudo sed -i 's/\"1\"/\"0\"/g' /etc/apt/apt.conf.d/20auto-upgrades") + } + return "", nil + default: + return "", nil + } +} + +// Install packages on various OS +func (node *Node) InstallPackages(packagesTemplate string, pars ...any) error { + packages := fmt.Sprintf(packagesTemplate, pars...) + switch node.Configs.System.CurrentOS { + case "ubuntu": + _, err := node.ExecShellCmd("sudo apt-get -qq update && sudo apt-get -qq install -y --allow-downgrades --allow-change-held-packages %s", packages) + return err + case "centos": + _, err := node.ExecShellCmd("sudo dnf -y -q install %s", packages) + return err + case "rocky linux": + _, err := node.ExecShellCmd("sudo dnf -y -q install %s", packages) + return err + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + return &ShellError{msg: "Unsupported Linux distribution", exitCode: 1} + } +} + +// Download file to temporary directory (absolute path of downloaded file will be the first return value if successful) +func (node *Node) DownloadToTmpDir(urlTemplate string, pars ...any) (string, error) { + url := fmt.Sprintf(urlTemplate, pars...) + fileName := path.Base(url) + filePath := node.Configs.System.TmpDir + "/" + fileName + _, err := node.ExecShellCmd("curl -sSL --output %s %s", filePath, url) + return filePath, err +} + +func (node *Node) LookPath(path string) (string, error) { + return node.ExecShellCmd("command -v %s", path) +} + +// Check system environment +func (node *Node) CheckSystemEnvironment() { + // Check system environment + utils.InfoPrintf("Checking system environment...\n") + var err error + + // Check Golang + _, err = node.LookPath("go") + if err != nil { + utils.InfoPrintf("Golang not found! Golang(version %s) will be automatically installed!\n", node.Configs.System.GoVersion) + } else { + utils.InfoPrintf("Golang found!\n") + node.Configs.System.GoInstalled = true + } + + // Check Containerd + _, err = node.LookPath("containerd") + if err != nil { + utils.InfoPrintf("Containerd not found! containerd(version %s) will be automatically installed!\n", node.Configs.System.ContainerdVersion) + } else { + utils.InfoPrintf("Containerd found!\n") + node.Configs.System.ContainerdInstalled = true + } + + // Check runc + _, err = node.LookPath("runc") + if err != nil { + utils.InfoPrintf("runc not found! runc(version %s) will be automatically installed!\n", node.Configs.System.RuncVersion) + } else { + utils.InfoPrintf("runc found!\n") + node.Configs.System.RuncInstalled = true + } + + // Check CNI plugins + _, err = node.ExecShellCmd("stat /opt/cni/bin") + if err != nil { + utils.InfoPrintf("CNI plugins not found! CNI plugins(version %s) will be automatically installed!\n", node.Configs.System.CniPluginsVersion) + } else { + utils.InfoPrintf("CNI plugins found!\n") + node.Configs.System.CniPluginsInstalled = true + } + + // Add OS-specific dependencies to installation lists + switch node.Configs.System.CurrentOS { + case "ubuntu": + node.Configs.System.Dependencies = "git wget curl build-essential apt-transport-https ca-certificates" + case "rocky linux": + node.Configs.System.Dependencies = "" + case "centos": + node.Configs.System.Dependencies = "" + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + } + + utils.InfoPrintf("Finish checking system environment!\n") +} + +func (node *Node) ReadSystemInfo() { + node.DetectOS() + node.DetectArch() + node.GetCurrentDir() + node.GetUserHomeDir() + node.GetNodeHostName() + node.CheckSystemEnvironment() +} + +// Initialize system environment +func (node *Node) SystemInit() { + utils.InfoPrintf("Start init system environment for node:%s\n", node.Name) + // Initialize + + var err error + + // node.ReadSystemInfo() // technically, this is not necessary + node.CreateTmpDir() + defer node.CleanUpTmpDir() + + // Turn off unattended-upgrades on ubuntu + utils.InfoPrintf("Turning off automatic upgrade") + _, err = node.TurnOffAutomaticUpgrade() + utils.CheckErrorWithTagAndMsg(err, "Failed to turn off automatic upgrade!\n") + + // Disable swap + utils.InfoPrintf("Disabling swap") + _, err = node.ExecShellCmd("sudo swapoff -a && sudo cp /etc/fstab /etc/fstab.old") // Turn off Swap && Backup fstab file + utils.CheckErrorWithTagAndMsg(err, "Failed to disable swap!\n") + + utils.InfoPrintf("Modifying fstab") + // Modify fstab to disable swap permanently + _, err = node.ExecShellCmd("sudo sed -i 's/#\\s*\\(.*swap.*\\)/\\1/g' /etc/fstab && sudo sed -i 's/.*swap.*/# &/g' /etc/fstab") + utils.CheckErrorWithTagAndMsg(err, "Failed to dodify fstab!\n") + + // Install dependencies + utils.InfoPrintf("Installing dependencies") + err = node.InstallPackages(node.Configs.System.Dependencies) + utils.CheckErrorWithTagAndMsg(err, "Failed to install dependencies!\n") + + // Install Golang + if !node.Configs.System.GoInstalled { + // Download & Extract Golang + utils.InfoPrintf("Downloading Golang(ver %s)", node.Configs.System.GoVersion) + filePathName, err := node.DownloadToTmpDir(node.Configs.System.GoDownloadUrlTemplate, node.Configs.System.GoVersion, node.Configs.System.CurrentArch) + utils.CheckErrorWithTagAndMsg(err, "Failed to download Golang(ver %s)!\n", node.Configs.System.GoVersion) + utils.InfoPrintf("Extracting Golang") + _, err = node.ExecShellCmd("sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf %s", filePathName) + utils.CheckErrorWithTagAndMsg(err, "Failed to extract Golang!\n") + + // For bash + _, err = node.ExecShellCmd("echo 'export PATH=$PATH:/usr/local/go/bin' >> %s/.bashrc", node.Configs.System.UserHomeDir) + utils.CheckErrorWithMsg(err, "Failed to update PATH!\n") + // For zsh + _, err = node.LookPath("zsh") + if err != nil { + _, err = node.ExecShellCmd("echo 'export PATH=$PATH:/usr/local/go/bin' >> %s/.zshrc", node.Configs.System.UserHomeDir) + utils.CheckErrorWithMsg(err, "Failed to update PATH!\n") + } + } + + // Install containerd + if !node.Configs.System.ContainerdInstalled { + // Download containerd + utils.InfoPrintf("Downloading containerd(ver %s)", node.Configs.System.ContainerdVersion) + filePathName, err := node.DownloadToTmpDir( + node.Configs.System.ContainerdDownloadUrlTemplate, + node.Configs.System.ContainerdVersion, + node.Configs.System.ContainerdVersion, + node.Configs.System.CurrentArch) + utils.CheckErrorWithTagAndMsg(err, "Failed to Download containerd(ver %s)\n", node.Configs.System.ContainerdVersion) + // Extract containerd + utils.InfoPrintf("Extracting containerd") + _, err = node.ExecShellCmd("sudo tar Cxzvf /usr/local %s", filePathName) + utils.CheckErrorWithTagAndMsg(err, "Failed to extract containerd!\n") + // Start containerd via systemd + utils.InfoPrintf("Downloading systemd profile for containerd") + filePathName, err = node.DownloadToTmpDir(node.Configs.System.ContainerdSystemdProfileDownloadUrl) + utils.CheckErrorWithTagAndMsg(err, "Failed to download systemd profile for containerd!\n") + utils.InfoPrintf("Starting containerd via systemd") + _, err = node.ExecShellCmd("sudo cp %s /lib/systemd/system/ && sudo systemctl daemon-reload && sudo systemctl enable --now containerd", filePathName) + utils.CheckErrorWithTagAndMsg(err, "Failed to start containerd via systemd!\n") + } + + // Install runc + if !node.Configs.System.RuncInstalled { + // Download runc + utils.InfoPrintf("Downloading runc(ver %s)", node.Configs.System.RuncVersion) + filePathName, err := node.DownloadToTmpDir( + node.Configs.System.RuncDownloadUrlTemplate, + node.Configs.System.RuncVersion, + node.Configs.System.CurrentArch) + utils.CheckErrorWithTagAndMsg(err, "Failed to download runc(ver %s)!\n", node.Configs.System.RuncVersion) + // Install runc + utils.InfoPrintf("Installing runc") + _, err = node.ExecShellCmd("sudo install -m 755 %s /usr/local/sbin/runc", filePathName) + utils.CheckErrorWithTagAndMsg(err, "Failed to install runc!\n") + } + + // Install CNI plugins + if !node.Configs.System.CniPluginsInstalled { + utils.InfoPrintf("Downloading CNI plugins(ver %s)", node.Configs.System.CniPluginsVersion) + filePathName, err := node.DownloadToTmpDir( + node.Configs.System.CniPluginsDownloadUrlTemplate, + node.Configs.System.CniPluginsVersion, + node.Configs.System.CurrentArch, + node.Configs.System.CniPluginsVersion) + utils.CheckErrorWithTagAndMsg(err, "Failed to download CNI plugins(ver %s)!\n", node.Configs.System.CniPluginsVersion) + utils.InfoPrintf("Extracting CNI plugins") + _, err = node.ExecShellCmd("sudo mkdir -p /opt/cni/bin && sudo tar Cxzvf /opt/cni/bin %s", filePathName) + utils.CheckErrorWithTagAndMsg(err, "Failed to extract CNI plugins!\n") + } + + // Configure the systemd cgroup driver + utils.InfoPrintf("Configuring the systemd cgroup driver") + _, err = node.ExecShellCmd( + "containerd config default > %s && sudo mkdir -p /etc/containerd && sudo cp %s /etc/containerd/config.toml && sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml && sudo systemctl restart containerd", + node.Configs.System.TmpDir+"/config.toml", + node.Configs.System.TmpDir+"/config.toml") + utils.CheckErrorWithTagAndMsg(err, "Failed to configure the systemd cgroup driver!\n") + + // Enable IP forwading & br_netfilter + utils.InfoPrintf("Enabling IP forwading & br_netfilter") + _, err = node.ExecShellCmd("sudo modprobe br_netfilter && sudo modprobe overlay && sudo sysctl -w net.ipv4.ip_forward=1 && sudo sysctl -w net.ipv4.conf.all.forwarding=1 && sudo sysctl -w net.bridge.bridge-nf-call-iptables=1 && sudo sysctl -w net.bridge.bridge-nf-call-ip6tables=1") + utils.CheckErrorWithTagAndMsg(err, "Failed to enable IP forwading & br_netfilter!\n") + // Ensure Boot-Resistant + utils.InfoPrintf("Ensuring Boot-Resistant") + _, err = node.ExecShellCmd("echo 'br_netfilter' | sudo tee /etc/modules-load.d/netfilter.conf && echo 'overlay' | sudo tee -a /etc/modules-load.d/netfilter.conf && sudo sed -i 's/# *net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf && sudo sed -i 's/net.ipv4.ip_forward=0/net.ipv4.ip_forward=1/g' /etc/sysctl.conf && echo 'net.bridge.bridge-nf-call-iptables=1\nnet.bridge.bridge-nf-call-ip6tables=1\nnet.ipv4.conf.all.forwarding=1' | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf") + utils.CheckErrorWithTagAndMsg(err, "Failed to ensure Boot-Resistant!\n") + + // Install kubeadm, kubelet, kubectl + switch node.Configs.System.CurrentOS { + case "ubuntu": + // Download Google Cloud public signing key and Add the Kubernetes apt repository + utils.InfoPrintf("Adding the Kubernetes apt repository") + _, err = node.ExecShellCmd("sudo mkdir -p /etc/apt/keyrings && sudo curl -fsSLo /etc/apt/keyrings/kubernetes-archive-keyring.gpg https://dl.k8s.io/apt/doc/apt-key.gpg && echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list") + utils.CheckErrorWithTagAndMsg(err, "Failed to add the Kubernetes apt repository!\n") + // Install kubeadm, kubelet, kubectl via apt + utils.InfoPrintf("Installing kubeadm, kubelet, kubectl") + err = node.InstallPackages("kubeadm=%s kubelet=%s kubectl=%s", node.Configs.System.KubeVersion, node.Configs.System.KubeVersion, node.Configs.System.KubeVersion) + utils.CheckErrorWithTagAndMsg(err, "Failed to install kubeadm, kubelet, kubectl!\n") + // Lock kubeadm, kubelet, kubectl version + utils.InfoPrintf("Locking kubeadm, kubelet, kubectl version") + _, err = node.ExecShellCmd("sudo apt-mark hold kubelet kubeadm kubectl") + utils.CheckErrorWithTagAndMsg(err, "Failed to lock kubeadm, kubelet, kubectl version!\n") + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + } +} diff --git a/scripts/openyurt-deployer/openyurt_deployer b/scripts/openyurt-deployer/openyurt_deployer new file mode 100755 index 000000000..6d05d8931 Binary files /dev/null and b/scripts/openyurt-deployer/openyurt_deployer differ diff --git a/scripts/openyurt-deployer/template/benchmarkTemplate.go b/scripts/openyurt-deployer/template/benchmarkTemplate.go new file mode 100644 index 000000000..d362117bd --- /dev/null +++ b/scripts/openyurt-deployer/template/benchmarkTemplate.go @@ -0,0 +1,24 @@ +package template + +// adopted from vSwarm[https://github.com/vhive-serverless/vSwarm/blob/main/benchmarks/aes/yamls/knative/kn-aes-python.yaml] +const ( + benchmarkTemplate = `echo 'apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld-python-%s + namespace: default +spec: + template: + spec: + nodeSelector: + apps.openyurt.io/nodepool: %s + containers: + - image: docker.io/vhiveease/hello-%s:latest + ports: + - name: h2c + containerPort: 50000' > %s` +) + +func GetBenchmarkTemplate() string { + return benchmarkTemplate +} diff --git a/scripts/openyurt-deployer/template/cloudNodePoolTemplate.go b/scripts/openyurt-deployer/template/cloudNodePoolTemplate.go new file mode 100644 index 000000000..90d255146 --- /dev/null +++ b/scripts/openyurt-deployer/template/cloudNodePoolTemplate.go @@ -0,0 +1,14 @@ +package template + +const ( + cloudTemplate = `echo 'apiVersion: apps.openyurt.io/v1beta1 +kind: NodePool +metadata: + name: %s +spec: + type: Cloud' > %s` +) + +func CreateCloudNpTemplate() string { + return cloudTemplate +} diff --git a/scripts/openyurt-deployer/template/edgeNodePoolTemplate.go b/scripts/openyurt-deployer/template/edgeNodePoolTemplate.go new file mode 100644 index 000000000..6f7a7c337 --- /dev/null +++ b/scripts/openyurt-deployer/template/edgeNodePoolTemplate.go @@ -0,0 +1,14 @@ +package template + +const ( + edgeTemplate = `echo 'apiVersion: apps.openyurt.io/v1beta1 +kind: NodePool +metadata: + name: %s +spec: + type: Edge' > %s` +) + +func CreateEdgeNpTemplate() string { + return edgeTemplate +} diff --git a/scripts/openyurt-deployer/template/go.mod b/scripts/openyurt-deployer/template/go.mod new file mode 100644 index 000000000..8d6ec5fe1 --- /dev/null +++ b/scripts/openyurt-deployer/template/go.mod @@ -0,0 +1,3 @@ +module github.com/vhive-serverless/vhive/scripts/openyurt_deployer/template + +go 1.20 \ No newline at end of file diff --git a/scripts/openyurt-deployer/template/kubeTemplate.go b/scripts/openyurt-deployer/template/kubeTemplate.go new file mode 100644 index 000000000..ab6530b96 --- /dev/null +++ b/scripts/openyurt-deployer/template/kubeTemplate.go @@ -0,0 +1,26 @@ +package template + +const ( + kubeletTemplate = `apiVersion: v1 +clusters: +- cluster: + server: http://127.0.0.1:10261 + name: default-cluster +contexts: +- context: + cluster: default-cluster + namespace: default + user: default-auth + name: default-context +current-context: default-context +kind: Config +preferences: {}` +) + +func GetKubeletConfig() string { + return kubeletTemplate +} + +func GetNetworkAddonConfigURL() string { + return vHiveConfigsURL + "/calico/canal.yaml" +} diff --git a/scripts/openyurt-deployer/template/shellTemplate.go b/scripts/openyurt-deployer/template/shellTemplate.go new file mode 100644 index 000000000..5fd995a86 --- /dev/null +++ b/scripts/openyurt-deployer/template/shellTemplate.go @@ -0,0 +1,20 @@ +package template + +const ( + restartPodsShellTemplate = `existingPods=$(kubectl get pod -A -o wide | grep %s) +originalIFS=${IFS} +IFS=$'\n' +while read -r pod +do + if [ -z "$(echo ${pod} | sed -n "/.*yurt-hub.*/p")" ]; then + podNameSpace=$(echo ${pod} | sed -n "s/\s*\(\S*\)\s*\(\S*\).*/\1/p") + podName=$(echo ${pod} | sed -n "s/\s*\(\S*\)\s*\(\S*\).*/\2/p") + echo "${podNameSpace} ${podName}" + fi +done <<< ${existingPods} +IFS=${originalIFS}` +) + +func GetRestartPodsShell() string { + return restartPodsShellTemplate +} diff --git a/scripts/openyurt-deployer/template/template.go b/scripts/openyurt-deployer/template/template.go new file mode 100644 index 000000000..1c73d9cc5 --- /dev/null +++ b/scripts/openyurt-deployer/template/template.go @@ -0,0 +1,3 @@ +package template + +var vHiveConfigsURL = "https://raw.githubusercontent.com/anshalshukla/vHive/release-1.9/configs" diff --git a/scripts/openyurt-deployer/utils.go b/scripts/openyurt-deployer/utils.go new file mode 100644 index 000000000..986a43b3e --- /dev/null +++ b/scripts/openyurt-deployer/utils.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/sfreiberg/simplessh" + "github.com/vhive-serverless/vHive/scripts/utils" +) + +func SetupSSHConn(nodeName string) *simplessh.Client { + utils.InfoPrintf("Connecting to %s\n", nodeName) + splits := strings.Split(nodeName, "@") + username := splits[0] + host := splits[1] + client, err := simplessh.ConnectWithAgent(host, username) + if err != nil { + utils.FatalPrintf("Failed to connect to: %s:%s\n", nodeName, err) + } + return client +} + +type ShellError struct { + msg string + exitCode int +} + +func (err *ShellError) Error() string { + return fmt.Sprintf("[exit %d] -> %s", err.exitCode, err.msg) +}