diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 122cc103f..1157ba76e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,7 +7,7 @@ on: - 'docs/**' - '**.md' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/build_setup.yml b/.github/workflows/build_setup.yml index 9c3caf214..0d113be06 100644 --- a/.github/workflows/build_setup.yml +++ b/.github/workflows/build_setup.yml @@ -7,7 +7,7 @@ on: - 'docs/**' - '**.md' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index e4edb98e9..ea64d3aa2 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -16,7 +16,7 @@ on: branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] pull_request: # The branches below must be a subset of the branches above - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] workflow_dispatch: schedule: - cron: '40 13 * * 2' diff --git a/.github/workflows/firecracker_cri_tests.yml b/.github/workflows/firecracker_cri_tests.yml index 5286c4d1b..303ba5e4f 100644 --- a/.github/workflows/firecracker_cri_tests.yml +++ b/.github/workflows/firecracker_cri_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/gvisor_cri_tests.yml b/.github/workflows/gvisor_cri_tests.yml index 33d1a77a0..237368072 100644 --- a/.github/workflows/gvisor_cri_tests.yml +++ b/.github/workflows/gvisor_cri_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index 049920c20..4931d99ca 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml index f0a5bee8d..119179ff6 100644 --- a/.github/workflows/linters.yml +++ b/.github/workflows/linters.yml @@ -3,7 +3,7 @@ on: push: branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] jobs: build: diff --git a/.github/workflows/openyurt-unit-test.yml b/.github/workflows/openyurt-unit-test.yml new file mode 100644 index 000000000..b25b650c1 --- /dev/null +++ b/.github/workflows/openyurt-unit-test.yml @@ -0,0 +1,41 @@ +name: Build and Test OpenYurt Deployer + +on: + push: + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + paths-ignore: + - 'docs/**' + - '**.md' + pull_request: + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] + paths-ignore: + - 'docs/**' + - '**.md' + workflow_dispatch: + +env: + GOOS: linux + GO111MODULE: on + +jobs: + openyurt-unit-test: + runs-on: ubuntu-latest + + steps: + - name: Set up Go 1.19 + uses: actions/setup-go@v4 + with: + go-version: '1.19' + + - name: Check out the code + uses: actions/checkout@v4 + + - name: Build scripts + run: + pushd scripts/openyurt-deployer && go build -o oy_deploy && popd + + - name: Run Unit Test on OpenYurt helper function + run: | + cd scripts/openyurt-deployer + chmod +x unit_test_workflow.sh + ./unit_test_workflow.sh diff --git a/.github/workflows/stargz_tests.yml b/.github/workflows/stargz_tests.yml index 63b110475..b2173783a 100644 --- a/.github/workflows/stargz_tests.yml +++ b/.github/workflows/stargz_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] paths-ignore: - 'docs/**' - '**.md' diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 0ff29ca02..a81e8843e 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -8,7 +8,7 @@ on: - '**.md' - 'function-images/**' pull_request: - branches: [ main, legacy-firecracker-v0.24.0-with-upf-support, openyurt ] + branches: [ main, legacy-firecracker-v0.24.0-with-upf-support ] paths-ignore: - 'docs/**' - '**.md' diff --git a/README.md b/README.md index 531eed295..043977dc6 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ including functions autoscaling and cold-start delay optimization with several s vHive has added support for the state-of-the-art extension [eStargz](https://github.com/containerd/stargz-snapshotter) to container layers and lazy pull support for container images. +vHive has added support for [`OpenYurt`](https://openyurt.io/), an open platform that extends upstream Kubernetes to the edge. It will assist in node management for cloud and edge nodes and can be deployed with [`openyurt-deployer`](scripts/openyurt-deployer/README.md). ## vHive architecture diff --git a/scripts/openyurt-deployer/README.md b/scripts/openyurt-deployer/README.md index 10b79f0c8..08be06006 100644 --- a/scripts/openyurt-deployer/README.md +++ b/scripts/openyurt-deployer/README.md @@ -1,11 +1,26 @@ # Quick set-up `OpenYurt` -## 1. Introduction +## 1.1 Introduction This program extends [`EasyOpenyurt`](https://github.com/flyinghorse0510/easy_openyurt) to automate the set up process of an `OpenYurt` cluster. It support setting up a Kubernetes cluster using kubeadm and then deploy `OpenYurt` and Knative on it. It is compatible with vHive stock-only mode. +## 1.2 About [`OpenYurt`](https://openyurt.io/docs/#:~:text=Furthermore%2C%20OpenYurt%20enhances%20node%20reliability,node%20heartbeats%20to%20the%20cloud.) + +### Key Features + +#### 1. Powerful edge autonomy capability +`OpenYurt` addresses this issue by implementing a per-node proxy (`YurtHub`) along with local storage to cache the state of the cloud apiserver. Consequently, when a node loses its connection, the cached states remain accessible to Kubelet, `KubeProxy`, and any user Pods. + +#### 2. Cross `NodePool` network communication capability + +In an edge computing Kubernetes cluster, nodes are often distributed across various geographical regions. Consequently, when relying on a native Container Network Interface (CNI) solution, Pods within different `NodePools` may be unable to communicate using Pod IP, Service IP, or Node IP, particularly if each `NodePool` resides within its own isolated LAN. Raven offers a networking solution that enables `cross-NodePool` communication within an `OpenYurt` cluster. + +#### 3. `Multi-NodePool` Management + +In order to manage applications and traffic in multiple node pools conveniently, `YurtAppSet` and `YurtAppDaemon` are introduced for managing workloads in `multi-nodepool`, and service topology capability for routing traffic in node pool. + ## 2. Brief overview **Prerequisite of nodes:** @@ -19,8 +34,8 @@ It support setting up a Kubernetes cluster using kubeadm and then deploy `OpenYu | :----------: | :---: | | main.go | script entry point | | `conf.json` | json files that stores cluster's configuration | -| node | executing commands on remote nodes through ssh | -| configs | node runtime configurations | +| node.go | node structure and related functions for Section 3.2 | +| configs | yaml templates | **Description** @@ -30,9 +45,7 @@ It support setting up a Kubernetes cluster using kubeadm and then deploy `OpenYu 4. (Optional) Deploy Knative (vHive stock-only mode compatible) ## 3. Usage -```bash -./openyurt_deployer deploy # deploy openyurt on the cluster -``` + ```bash ./openyurt_deployer clean # clean the openyurt cluster and restore it to initial state ``` @@ -56,24 +69,38 @@ It support setting up a Kubernetes cluster using kubeadm and then deploy `OpenYu ### 3.2 Run Script +#### 3.2.1 To set up Kubernetes and Kubernetes cluster ```bash go build . -./openyurt_deployer deploy -``` -If it gives out error like: -``` -FATA[0001] Failed to connect to: username@host -``` -Please execute: -``` -eval `ssh-agent -s` && ssh-add ~/.ssh/ -``` -For example: -``` -eval `ssh-agent -s` && ssh-add ~/.ssh/id_rsa +./openyurt_deployer k8s +``` +--- +**Note:** +> If it gives out error like: +> ``` +> FATA[0001] Failed to connect to: username@host +> ``` +> Please execute: +> ``` +> eval `ssh-agent -s` && ssh-add ~/.ssh/ +> ``` +> For example: +> ``` +> eval `ssh-agent -s` && ssh-add ~/.ssh/id_rsa +> ``` +> And try again + +--- + +#### 3.2.2 To set up Knative +```bash +./openyurt_deployer knative ``` -And try again +#### 3.2.2 To set up Open Yurt +```bash +./openyurt_deployer openyurt +``` ## 4. Demo: Create `NodePool` And Deploy service on it **Referenced from [`OpenYurt`](https://openyurt.io/docs/user-manuals/workload/node-pool-management)* @@ -82,31 +109,27 @@ The demo would deploy a helloworld function to cloud node pool or edge node pool Deploy the demo: ``` -./openyurt_deployer demo-c +./openyurt_deployer demo-deploy-on-cloud ``` or: ``` -./openyurt_deployer demo-e +./openyurt_deployer demo-deploy-on-edge ``` -where `demo-c` would deploy the service to the cloud node pool and `demo-e` would deploy the service to the edge node pool. +where `demo-deploy-on-cloud` would deploy the service to the cloud node pool and `demo-deploy-on-edge` would deploy the service to the edge node pool. The demo code will also show information about node pool after deployment. -The name for `demo-c` would be `helloworld-cloud`, while the name for `demo-e` would be `helloworld-edge` +The name for `demo-deploy-on-cloud` would be `helloworld-cloud`, while the name for `demo-deploy-on-edge` would be `helloworld-edge` It will also show the services' `URL` so you can try to invoke it on the master node. You can check the node pool information simply by: ``` ./openyurt_deployer demo-print ``` -Or delete the services deployed on node pool by: -``` -./openyurt_deployer demo-clear -``` The demo code will also show information about node pool after deployment. ### 4.1 Invoke the Services (Optional) -You can try to invoke the services created by `demo-c` or `demo-e` on master node. +You can try to invoke the services created by `demo-deploy-on-cloud` or `demo-deploy-on-edge` on master node. First, ssh to master node, following commands should all be executed on master node. ``` ssh @ diff --git a/scripts/openyurt-deployer/configs/benchmarkTemplate.yaml b/scripts/openyurt-deployer/configs/benchmarkTemplate.yaml new file mode 100644 index 000000000..0b02ee801 --- /dev/null +++ b/scripts/openyurt-deployer/configs/benchmarkTemplate.yaml @@ -0,0 +1,15 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld-python-isCloud + namespace: default +spec: + template: + spec: + nodeSelector: + apps.openyurt.io/nodepool: poolName + containers: + - image: docker.io/vhiveease/hello-isCloud:latest + ports: + - name: h2c + containerPort: 50000 \ No newline at end of file diff --git a/scripts/openyurt-deployer/configs/cloudNodePoolTemplate.yaml b/scripts/openyurt-deployer/configs/cloudNodePoolTemplate.yaml new file mode 100644 index 000000000..973dd3262 --- /dev/null +++ b/scripts/openyurt-deployer/configs/cloudNodePoolTemplate.yaml @@ -0,0 +1,6 @@ +apiVersion: apps.openyurt.io/v1beta1 +kind: NodePool +metadata: + name: poolName +spec: + type: Cloud \ No newline at end of file diff --git a/scripts/openyurt-deployer/configs/configs.go b/scripts/openyurt-deployer/configs/configs.go deleted file mode 100644 index 64bd01fd6..000000000 --- a/scripts/openyurt-deployer/configs/configs.go +++ /dev/null @@ -1,91 +0,0 @@ -package configs - -import ( - "encoding/json" - "io" - "os" - "path" - - utils "github.com/vhive-serverless/vHive/scripts/utils" -) - -// Decode specific config files (JSON format) -func DecodeConfig(configFilePath string, configStruct interface{}) error { - // Open & read the config file - configFile, err := os.Open(configFilePath) - if err != nil { - return err - } - defer configFile.Close() - - // Read file content - configContent, err := io.ReadAll(configFile) - if err != nil { - return err - } - - // Decode json into struct - err = json.Unmarshal(configContent, configStruct) - - return err - -} - -// Load knative config files -func (knative *KnativeConfigStruct) LoadConfig() error { - var err error - // Check config directory - if len(VHive.VHiveSetupConfigPath) == 0 { - VHive.VHiveSetupConfigPath, err = utils.GetVHiveFilePath("configs/setup") - if err != nil { - utils.CleanEnvironment() - os.Exit(1) - } - } - // Get the (absolute) path of the config file - configFilePath := path.Join(VHive.VHiveSetupConfigPath, "knative.json") - - // Decode json into struct - err = DecodeConfig(configFilePath, knative) - - return err - -} - -// Load kubernetes config files -func (kube *KubeConfigStruct) LoadConfig() error { - // Get the (absolute) path of the config file - configFilePath := path.Join(VHive.VHiveSetupConfigPath, "kube.json") - - // Decode json into struct - err := DecodeConfig(configFilePath, kube) - - return err -} - -// Load system config files -func (system *SystemEnvironmentStruct) LoadConfig() error { - // Get the (absolute) path of the config file - configFilePath := path.Join(VHive.VHiveSetupConfigPath, "system.json") - - // Decode json into struct - err := DecodeConfig(configFilePath, system) - - return err -} - -// Load vHive config files -func (vhive *VHiveConfigStruct) LoadConfig() error { - // Get the (absolute) path of the config file - configFilePath := path.Join(VHive.VHiveSetupConfigPath, "vhive.json") - - // Decode json into struct - err := DecodeConfig(configFilePath, vhive) - - return err - -} - -const ( - Version = "0.2.4b" // Version Info -) diff --git a/scripts/openyurt-deployer/configs/demo.go b/scripts/openyurt-deployer/configs/demo.go deleted file mode 100644 index bb43769e9..000000000 --- a/scripts/openyurt-deployer/configs/demo.go +++ /dev/null @@ -1,21 +0,0 @@ -package configs - -type DemoEnvironment struct { - CloudYamlFile string - EdgeYamlFile string - CloudBenchYamlFile string - EdgeBenchYamlFile string - YurtAppSetYamlFile string - CloudPoolName string - EdgePoolName string -} - -var Demo = DemoEnvironment{ - CloudYamlFile: "cloud.yaml", - EdgeYamlFile: "edge.yaml", - CloudBenchYamlFile: "cloud-bench.yaml", - EdgeBenchYamlFile: "edge-bench.yaml", - YurtAppSetYamlFile: "yurt.yaml", - CloudPoolName: "cloud", - EdgePoolName: "edge", -} diff --git a/scripts/openyurt-deployer/configs/edgeNodePoolTemplate.yaml b/scripts/openyurt-deployer/configs/edgeNodePoolTemplate.yaml new file mode 100644 index 000000000..0dc5fbf98 --- /dev/null +++ b/scripts/openyurt-deployer/configs/edgeNodePoolTemplate.yaml @@ -0,0 +1,6 @@ +apiVersion: apps.openyurt.io/v1beta1 +kind: NodePool +metadata: + name: poolName +spec: + type: Edge \ No newline at end of file diff --git a/scripts/openyurt-deployer/configs/go.mod b/scripts/openyurt-deployer/configs/go.mod deleted file mode 100644 index e19ce9147..000000000 --- a/scripts/openyurt-deployer/configs/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/vhive-serverless/vhive/scripts/openyurt_deployer/configs - -go 1.20 \ No newline at end of file diff --git a/scripts/openyurt-deployer/configs/knative.go b/scripts/openyurt-deployer/configs/knative.go deleted file mode 100644 index 557cb0685..000000000 --- a/scripts/openyurt-deployer/configs/knative.go +++ /dev/null @@ -1,35 +0,0 @@ -package configs - -import "fmt" - -type KnativeConfigStruct struct { - KnativeVersion string - IstioVersion string - IstioDownloadUrlTemplate string - IstioOperatorConfigUrl string - MetalLBVersion string - MetalLBConfigURLArray []string - LocalRegistryRepoVolumeSize string - LocalRegistryVolumeConfigUrl string - LocalRegistryDockerRegistryConfigUrl string - LocalRegistryHostUpdateConfigUrl string - MagicDNSConfigUrl string - VHiveMode bool -} - -var Knative = KnativeConfigStruct{ - IstioOperatorConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/istio/istio-minimal-operator.yaml", - IstioDownloadUrlTemplate: "https://github.com/istio/istio/releases/download/%s/istio-%s-linux-%s.tar.gz", - MetalLBConfigURLArray: []string{ - "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/metallb/metallb-ipaddresspool.yaml", - "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/metallb/metallb-l2advertisement.yaml"}, - LocalRegistryVolumeConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/repository-volume.yaml", - LocalRegistryDockerRegistryConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/docker-registry.yaml", - LocalRegistryHostUpdateConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/repository-update-hosts.yaml", //TODO: uses path - MagicDNSConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/knative_yamls/serving-default-domain.yaml", //TODO: uses path - VHiveMode: true, -} - -func (knative *KnativeConfigStruct) GetIstioDownloadUrl() string { - return fmt.Sprintf(knative.IstioDownloadUrlTemplate, knative.IstioVersion, knative.IstioVersion, System.CurrentArch) -} diff --git a/scripts/openyurt-deployer/configs/kube.go b/scripts/openyurt-deployer/configs/kube.go deleted file mode 100644 index b88f21c46..000000000 --- a/scripts/openyurt-deployer/configs/kube.go +++ /dev/null @@ -1,14 +0,0 @@ -package configs - -type KubeConfigStruct struct { - K8sVersion string - AlternativeImageRepo string - ApiserverAdvertiseAddress string - PodNetworkCidr string - PodNetworkAddonConfigURL string - ApiserverPort string - ApiserverToken string - ApiserverTokenHash string -} - -var Kube = KubeConfigStruct{} diff --git a/scripts/openyurt-deployer/template/kubeTemplate.go b/scripts/openyurt-deployer/configs/kubeTemplate.yaml similarity index 51% rename from scripts/openyurt-deployer/template/kubeTemplate.go rename to scripts/openyurt-deployer/configs/kubeTemplate.yaml index ab6530b96..81f572fff 100644 --- a/scripts/openyurt-deployer/template/kubeTemplate.go +++ b/scripts/openyurt-deployer/configs/kubeTemplate.yaml @@ -1,7 +1,4 @@ -package template - -const ( - kubeletTemplate = `apiVersion: v1 +apiVersion: v1 clusters: - cluster: server: http://127.0.0.1:10261 @@ -14,13 +11,4 @@ contexts: name: default-context current-context: default-context kind: Config -preferences: {}` -) - -func GetKubeletConfig() string { - return kubeletTemplate -} - -func GetNetworkAddonConfigURL() string { - return vHiveConfigsURL + "/calico/canal.yaml" -} +preferences: {} \ No newline at end of file diff --git a/scripts/openyurt-deployer/configs/system.go b/scripts/openyurt-deployer/configs/system.go deleted file mode 100644 index b2d1a4de7..000000000 --- a/scripts/openyurt-deployer/configs/system.go +++ /dev/null @@ -1,74 +0,0 @@ -package configs - -import ( - "fmt" - "runtime" -) - -// System environment struct -type SystemEnvironmentStruct struct { - GoInstalled bool - ContainerdInstalled bool - RuncInstalled bool - CniPluginsInstalled bool - SystemdStartUp bool - NodeHostName string - GoVersion string - GoDownloadUrlTemplate string - ContainerdVersion string - ContainerdDownloadUrlTemplate string - ContainerdSystemdProfileDownloadUrl string - RuncVersion string - RuncDownloadUrlTemplate string - RunscVersion string - RunscDownloadUrlTemplate string - CniPluginsVersion string - CniPluginsDownloadUrlTemplate string - KubeVersion string - Dependencies string - TmpDir string - CurrentOS string - CurrentArch string - CurrentDir string - UserHomeDir string - PmuToolsRepoUrl string - ProtocVersion string - ProtocDownloadUrlTemplate string -} - -// Current system environment -var System = SystemEnvironmentStruct{ - GoInstalled: false, - ContainerdInstalled: false, - RuncInstalled: false, - CniPluginsInstalled: false, - SystemdStartUp: true, - CurrentOS: runtime.GOOS, - CurrentArch: runtime.GOARCH, - CurrentDir: "", - UserHomeDir: "", - NodeHostName: "", -} - -func (system *SystemEnvironmentStruct) GetProtocDownloadUrl() string { - return fmt.Sprintf(system.ProtocDownloadUrlTemplate, system.ProtocVersion, system.ProtocVersion) -} - -func (system *SystemEnvironmentStruct) GetContainerdDownloadUrl() string { - return fmt.Sprintf(system.ContainerdDownloadUrlTemplate, system.ContainerdVersion, system.ContainerdVersion, system.CurrentArch) -} - -func (system *SystemEnvironmentStruct) GetRuncDownloadUrl() string { - return fmt.Sprintf(system.RuncDownloadUrlTemplate, system.RuncVersion, system.CurrentArch) -} - -func (system *SystemEnvironmentStruct) GetRunscDownloadUrl() string { - unameArch := system.CurrentArch - switch unameArch { - case "amd64": - unameArch = "x86_64" - default: - } - - return fmt.Sprintf(system.RunscDownloadUrlTemplate, system.RunscVersion, unameArch) -} diff --git a/scripts/openyurt-deployer/configs/vhive.go b/scripts/openyurt-deployer/configs/vhive.go deleted file mode 100644 index e3ec12ebd..000000000 --- a/scripts/openyurt-deployer/configs/vhive.go +++ /dev/null @@ -1,19 +0,0 @@ -package configs - -type VHiveConfigStruct struct { - FirecrackerKernelImgDownloadUrl string - StargzVersion string - VHiveRepoPath string - VHiveRepoBranch string - VHiveRepoUrl string - VHiveSetupConfigPath string - ForceRemote bool -} - -var VHive = VHiveConfigStruct{ - VHiveRepoPath: ".", - VHiveRepoBranch: "main", - VHiveRepoUrl: "https://github.com/vhive-serverless/vHive.git", - VHiveSetupConfigPath: "../../configs/setup", - ForceRemote: false, -} diff --git a/scripts/openyurt-deployer/configs/yurt.go b/scripts/openyurt-deployer/configs/yurt.go deleted file mode 100644 index 10cc29f2a..000000000 --- a/scripts/openyurt-deployer/configs/yurt.go +++ /dev/null @@ -1,25 +0,0 @@ -package configs - -type YurtEnvironment struct { - HelmInstalled bool - HelmPublicSigningKeyDownloadUrl string - KustomizeInstalled bool - KustomizeScriptDownloadUrl string - MasterAsCloud bool - WorkerNodeName string - WorkerAsEdge bool - Dependencies string - YurtVersion string -} - -var Yurt = YurtEnvironment{ - HelmInstalled: false, - HelmPublicSigningKeyDownloadUrl: "https://baltocdn.com/helm/signing.asc", - KustomizeInstalled: false, - KustomizeScriptDownloadUrl: "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh", - MasterAsCloud: true, - WorkerNodeName: "", - WorkerAsEdge: true, - Dependencies: "", - YurtVersion: "1.2.1", -} diff --git a/scripts/openyurt-deployer/configs/yurtAppSetTemplate.yaml b/scripts/openyurt-deployer/configs/yurtAppSetTemplate.yaml new file mode 100644 index 000000000..d1b311aa6 --- /dev/null +++ b/scripts/openyurt-deployer/configs/yurtAppSetTemplate.yaml @@ -0,0 +1,59 @@ +apiVersion: apps.openyurt.io/v1alpha1 +kind: YurtAppSet +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: openyurt-aes +spec: + selector: + matchLabels: + app: openyurt-aes + workloadTemplate: + deploymentTemplate: + metadata: + labels: + app: openyurt-aes + spec: + template: + metadata: + labels: + app: openyurt-aes + spec: + containers: + - name: relay + image: docker.io/vhiveease/relay:latest + ports: + - name: h2c + containerPort: 50000 + args: + - --addr=0.0.0.0:50000 + - --function-endpoint-url=0.0.0.0 + - --function-endpoint-port=50051 + - --function-name=aes-python + - name: aes-python + image: docker.io/vhiveease/aes-python:latest + args: + - --addr=0.0.0.0 + - --port=50051 + topology: + pools: + - name: %s + nodeSelectorTerm: + matchExpressions: + - key: apps.openyurt.io/nodepool + operator: In + values: + - %s + replicas: 1 + - name: %s + nodeSelectorTerm: + matchExpressions: + - key: apps.openyurt.io/nodepool + operator: In + values: + - %s + replicas: 1 + tolerations: + - effect: NoSchedule + key: apps.openyurt.io/example + operator: Exists \ No newline at end of file diff --git a/scripts/openyurt-deployer/configs/yurtTemplate.yaml b/scripts/openyurt-deployer/configs/yurtTemplate.yaml new file mode 100644 index 000000000..1717497a0 --- /dev/null +++ b/scripts/openyurt-deployer/configs/yurtTemplate.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + k8s-app: yurt-hub + name: yurt-hub + namespace: kube-system +spec: + volumes: + - name: hub-dir + hostPath: + path: /var/lib/yurthub + type: DirectoryOrCreate + - name: kubernetes + hostPath: + path: /etc/kubernetes + type: Directory + - name: pem-dir + hostPath: + path: /var/lib/kubelet/pki + type: Directory + containers: + - name: yurt-hub + image: openyurt/yurthub:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - name: hub-dir + mountPath: /var/lib/yurthub + - name: kubernetes + mountPath: /etc/kubernetes + - name: pem-dir + mountPath: /var/lib/kubelet/pki + command: + - yurthub + - --v=2 + - --server-addr=https://__kubernetes_master_address__ + - --node-name=$(NODE_NAME) + - --join-token=__bootstrap_token__ + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /v1/healthz + port: 10267 + initialDelaySeconds: 300 + periodSeconds: 5 + failureThreshold: 3 + resources: + requests: + cpu: 150m + memory: 150Mi + limits: + memory: 300Mi + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + hostNetwork: true + priorityClassName: system-node-critical + priority: 2000001000 \ No newline at end of file diff --git a/scripts/openyurt-deployer/configurations.go b/scripts/openyurt-deployer/configurations.go new file mode 100644 index 000000000..71b318454 --- /dev/null +++ b/scripts/openyurt-deployer/configurations.go @@ -0,0 +1,291 @@ +// MIT License +// +// # Copyright (c) 2023 Jason Chua, Ruiqi Lai and vHive team +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path" + "runtime" + + utils "github.com/vhive-serverless/vHive/scripts/utils" +) + +type DemoEnvironment struct { + CloudYamlFile string + EdgeYamlFile string + CloudBenchYamlFile string + EdgeBenchYamlFile string + YurtAppSetYamlFile string + CloudPoolName string + EdgePoolName string +} + +type KnativeConfigStruct struct { + KnativeVersion string + IstioVersion string + IstioDownloadUrlTemplate string + IstioOperatorConfigUrl string + MetalLBVersion string + MetalLBConfigURLArray []string + LocalRegistryRepoVolumeSize string + LocalRegistryVolumeConfigUrl string + LocalRegistryDockerRegistryConfigUrl string + LocalRegistryHostUpdateConfigUrl string + MagicDNSConfigUrl string + VHiveMode bool +} + +type KubeConfigStruct struct { + K8sVersion string + AlternativeImageRepo string + ApiserverAdvertiseAddress string + PodNetworkCidr string + PodNetworkAddonConfigURL string + ApiserverPort string + ApiserverToken string + ApiserverTokenHash string +} + +type SystemEnvironmentStruct struct { + GoInstalled bool + ContainerdInstalled bool + RuncInstalled bool + CniPluginsInstalled bool + SystemdStartUp bool + NodeHostName string + GoVersion string + GoDownloadUrlTemplate string + ContainerdVersion string + ContainerdDownloadUrlTemplate string + ContainerdSystemdProfileDownloadUrl string + RuncVersion string + RuncDownloadUrlTemplate string + RunscVersion string + RunscDownloadUrlTemplate string + CniPluginsVersion string + CniPluginsDownloadUrlTemplate string + KubeVersion string + KubeRepoUrl string + Dependencies string + TmpDir string + CurrentOS string + CurrentArch string + CurrentDir string + UserHomeDir string + PmuToolsRepoUrl string + ProtocVersion string + ProtocDownloadUrlTemplate string +} + +type YurtEnvironment struct { + HelmInstalled bool + HelmPublicSigningKeyDownloadUrl string + KustomizeInstalled bool + KustomizeScriptDownloadUrl string + MasterAsCloud bool + WorkerNodeName string + WorkerAsEdge bool + Dependencies string + YurtVersion string +} + +type VHiveConfigStruct struct { + FirecrackerKernelImgDownloadUrl string + StargzVersion string + VHiveRepoPath string + VHiveRepoBranch string + VHiveRepoUrl string + VHiveSetupConfigPath string + ForceRemote bool +} + +// Variables for all configs structure +var Demo = DemoEnvironment{ + CloudYamlFile: "cloud.yaml", + EdgeYamlFile: "edge.yaml", + CloudBenchYamlFile: "cloud-bench.yaml", + EdgeBenchYamlFile: "edge-bench.yaml", + YurtAppSetYamlFile: "yurt.yaml", + CloudPoolName: "cloud", + EdgePoolName: "edge", +} + +var Knative = KnativeConfigStruct{ + IstioOperatorConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/istio/istio-minimal-operator.yaml", + IstioDownloadUrlTemplate: "https://github.com/istio/istio/releases/download/%s/istio-%s-linux-%s.tar.gz", + MetalLBConfigURLArray: []string{ + "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/metallb/metallb-ipaddresspool.yaml", + "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/metallb/metallb-l2advertisement.yaml"}, + LocalRegistryVolumeConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/repository-volume.yaml", + LocalRegistryDockerRegistryConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/docker-registry.yaml", + LocalRegistryHostUpdateConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/registry/repository-update-hosts.yaml", //TODO: uses path + MagicDNSConfigUrl: "https://raw.githubusercontent.com/vhive-serverless/vHive/main/configs/knative_yamls/serving-default-domain.yaml", //TODO: uses path + VHiveMode: true, +} + +var Kube = KubeConfigStruct{} + +var System = SystemEnvironmentStruct{ + GoInstalled: false, + ContainerdInstalled: false, + RuncInstalled: false, + CniPluginsInstalled: false, + SystemdStartUp: true, + CurrentOS: runtime.GOOS, + CurrentArch: runtime.GOARCH, + CurrentDir: "", + UserHomeDir: "", + NodeHostName: "", +} + +var VHive = VHiveConfigStruct{ + VHiveRepoPath: ".", + VHiveRepoBranch: "main", + VHiveRepoUrl: "https://github.com/vhive-serverless/vHive.git", + VHiveSetupConfigPath: "../../configs/setup", + ForceRemote: false, +} + +var Yurt = YurtEnvironment{ + HelmInstalled: false, + HelmPublicSigningKeyDownloadUrl: "https://baltocdn.com/helm/signing.asc", + KustomizeInstalled: false, + KustomizeScriptDownloadUrl: "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh", + MasterAsCloud: true, + WorkerNodeName: "", + WorkerAsEdge: true, + Dependencies: "", + YurtVersion: "1.2.1", +} + +// Helper functions to get URL +func (knative *KnativeConfigStruct) GetIstioDownloadUrl() string { + return fmt.Sprintf(knative.IstioDownloadUrlTemplate, knative.IstioVersion, knative.IstioVersion, System.CurrentArch) +} + +func (system *SystemEnvironmentStruct) GetProtocDownloadUrl() string { + return fmt.Sprintf(system.ProtocDownloadUrlTemplate, system.ProtocVersion, system.ProtocVersion) +} + +func (system *SystemEnvironmentStruct) GetContainerdDownloadUrl() string { + return fmt.Sprintf(system.ContainerdDownloadUrlTemplate, system.ContainerdVersion, system.ContainerdVersion, system.CurrentArch) +} + +func (system *SystemEnvironmentStruct) GetRuncDownloadUrl() string { + return fmt.Sprintf(system.RuncDownloadUrlTemplate, system.RuncVersion, system.CurrentArch) +} + +func (system *SystemEnvironmentStruct) GetRunscDownloadUrl() string { + unameArch := system.CurrentArch + switch unameArch { + case "amd64": + unameArch = "x86_64" + default: + } + + return fmt.Sprintf(system.RunscDownloadUrlTemplate, system.RunscVersion, unameArch) +} + +// Decode specific config files (JSON format) +func DecodeConfig(configFilePath string, configStruct interface{}) error { + // Open & read the config file + configFile, err := os.Open(configFilePath) + if err != nil { + return err + } + defer configFile.Close() + + // Read file content + configContent, err := io.ReadAll(configFile) + if err != nil { + return err + } + + // Decode json into struct + err = json.Unmarshal(configContent, configStruct) + + return err + +} + +// Load knative config files +func (knative *KnativeConfigStruct) LoadConfig() error { + var err error + // Check config directory + if len(VHive.VHiveSetupConfigPath) == 0 { + VHive.VHiveSetupConfigPath, err = utils.GetVHiveFilePath("configs/setup") + if err != nil { + utils.CleanEnvironment() + os.Exit(1) + } + } + // Get the (absolute) path of the config file + configFilePath := path.Join(VHive.VHiveSetupConfigPath, "knative.json") + + // Decode json into struct + err = DecodeConfig(configFilePath, knative) + + return err + +} + +// Load kubernetes config files +func (kube *KubeConfigStruct) LoadConfig() error { + // Get the (absolute) path of the config file + configFilePath := path.Join(VHive.VHiveSetupConfigPath, "kube.json") + + // Decode json into struct + err := DecodeConfig(configFilePath, kube) + + return err +} + +// Load system config files +func (system *SystemEnvironmentStruct) LoadConfig() error { + // Get the (absolute) path of the config file + configFilePath := path.Join(VHive.VHiveSetupConfigPath, "system.json") + + // Decode json into struct + err := DecodeConfig(configFilePath, system) + + return err +} + +// Load vHive config files +func (vhive *VHiveConfigStruct) LoadConfig() error { + // Get the (absolute) path of the config file + configFilePath := path.Join(VHive.VHiveSetupConfigPath, "vhive.json") + + // Decode json into struct + err := DecodeConfig(configFilePath, vhive) + + return err + +} + +const ( + Version = "0.2.4b" // Version Info +) diff --git a/scripts/openyurt-deployer/go.mod b/scripts/openyurt-deployer/go.mod index 88687a0ae..80f074631 100644 --- a/scripts/openyurt-deployer/go.mod +++ b/scripts/openyurt-deployer/go.mod @@ -1,15 +1,18 @@ module github.com/vhive-serverless/vhive/scripts/openyurt_deployer -go 1.20 +go 1.19 + +require ( + github.com/sfreiberg/simplessh v0.0.0-20220719182921-185eafd40485 + github.com/sirupsen/logrus v1.9.3 + github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231114022852-400a49d284cb +) require ( github.com/davidmz/go-pageant v1.0.2 // indirect github.com/kr/fs v0.1.0 // indirect github.com/pkg/sftp v1.13.4 // indirect - github.com/sfreiberg/simplessh v0.0.0-20220719182921-185eafd40485 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231018022901-6a0c478d2c9f // indirect - github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231018022901-6a0c478d2c9f // indirect + github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231114022852-400a49d284cb // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect ) diff --git a/scripts/openyurt-deployer/go.sum b/scripts/openyurt-deployer/go.sum old mode 100644 new mode 100755 index 9f7752abe..75e44f4ed --- a/scripts/openyurt-deployer/go.sum +++ b/scripts/openyurt-deployer/go.sum @@ -1,4 +1,5 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0= github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE= @@ -6,17 +7,19 @@ github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/pkg/sftp v1.13.4 h1:Lb0RYJCmgUcBgZosfoi9Y9sbl6+LJgOIgk/2Y4YjMFg= github.com/pkg/sftp v1.13.4/go.mod h1:LzqnAvaD5TWeNBsZpfKxSYn1MbjWwOsCIAFFJbpIsK8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sfreiberg/simplessh v0.0.0-20220719182921-185eafd40485 h1:ZMBZ2DKX1sScUSo9ZUwGI7jCMukslPNQNfZaw9vVyfY= github.com/sfreiberg/simplessh v0.0.0-20220719182921-185eafd40485/go.mod h1:9qeq2P58+4+LyuncL3waJDG+giOfXgowfrRZZF9XdWk= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231018022901-6a0c478d2c9f h1:2Cyd5RJaZ0Pdyk7Az338/PKeofj7RLZjD5rSAO+wqvk= -github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231018022901-6a0c478d2c9f/go.mod h1:nJSon4Eng7PdZ4HJX9dnZ7H4qxVm/r5zseFPfom7Jto= -github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231018022901-6a0c478d2c9f h1:3KArl/h4PpkhAhlpPAMarh9hrj6AWYfnhkoXZCS2AWw= -github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231018022901-6a0c478d2c9f/go.mod h1:xyjKlPn6JqSQtzKOCu8L4DW4rQcNmhxK9f97cOPo0Sg= +github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231114022852-400a49d284cb h1:JoudbjIOvkLiDY7WjaQosP0IKipHnlxFOCIeICQrtRQ= +github.com/vhive-serverless/vHive/scripts/configs v0.0.0-20231114022852-400a49d284cb/go.mod h1:nJSon4Eng7PdZ4HJX9dnZ7H4qxVm/r5zseFPfom7Jto= +github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231114022852-400a49d284cb h1:SmFLKJc4wAhr61ige8hoLiLWDKAI2uewBQUNejI63Hs= +github.com/vhive-serverless/vHive/scripts/utils v0.0.0-20231114022852-400a49d284cb/go.mod h1:xyjKlPn6JqSQtzKOCu8L4DW4rQcNmhxK9f97cOPo0Sg= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= @@ -24,12 +27,12 @@ golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/scripts/openyurt-deployer/go.work b/scripts/openyurt-deployer/go.work index 8c0d69e84..63b4c58ec 100644 --- a/scripts/openyurt-deployer/go.work +++ b/scripts/openyurt-deployer/go.work @@ -2,7 +2,4 @@ go 1.19 use( . -./configs -./node -./template ) \ No newline at end of file diff --git a/scripts/openyurt-deployer/main.go b/scripts/openyurt-deployer/main.go index 86e414443..5cb0180e9 100644 --- a/scripts/openyurt-deployer/main.go +++ b/scripts/openyurt-deployer/main.go @@ -1,15 +1,36 @@ +// MIT License +// +// Copyright (c) 2023 Jason Chua, Ruiqi Lai and vHive team +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package main import ( "encoding/json" "flag" "os" + "strings" "github.com/vhive-serverless/vHive/scripts/utils" log "github.com/sirupsen/logrus" - "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/configs" - "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/node" ) type NodesInfo struct { @@ -58,22 +79,22 @@ func main() { } operation := os.Args[1] switch operation { - case "deploy": + case "k8s": deployNodes(*deployerConf) - // case "clean": - // cleanNodes(*deployerConf) - // case "demo-e": - // demo(*deployerConf, false) - // case "demo-c": - // demo(*deployerConf, true) - // case "demo-clear": - // delDemo(*deployerConf) - // case "demo-print": - // printDemo(*deployerConf) - // case "deploy-yurt": - // deployOpenYurt(*deployerConf) + case "knative": + deployKnative(*deployerConf) + case "demo-deploy-on-edge": + demo(*deployerConf, false) + case "demo-deploy-on-cloud": + demo(*deployerConf, true) + case "demo-print": + printDemo(*deployerConf) + case "openyurt": + deployOpenYurt(*deployerConf) + case "help": + utils.InfoPrintf("Usage: %s [Parameters...]\n", os.Args[0]) default: - utils.InfoPrintf("Usage: %s [Parameters...]\n", os.Args[0]) + utils.InfoPrintf("Usage: %s [Parameters...]\n", os.Args[0]) os.Exit(-1) } @@ -95,40 +116,40 @@ func readAndUnMarshall(deployerConfFile string) (NodesInfo, error) { return nodesInfo, nil } -func parseNodeInfo(nodesInfo NodesInfo) []node.Node { +func parseNodeInfo(nodesInfo NodesInfo) []Node { masterName := nodesInfo.Master cloudNames := nodesInfo.Workers.Cloud edgeNames := nodesInfo.Workers.Edge - nodeList := []node.Node{} + nodeList := []Node{} // Load configs from configs/setup.json - configs.System.LoadConfig() - configs.Knative.LoadConfig() - configs.Kube.LoadConfig() - - masterNode := node.Node{Name: masterName, Client: SetupSSHConn(masterName), NodeRole: "master", Configs: &node.NodeConfig{ - System: configs.System, - Kube: configs.Kube, - Knative: configs.Knative, - Yurt: configs.Yurt, - Demo: configs.Demo}} + System.LoadConfig() + Knative.LoadConfig() + Kube.LoadConfig() + + masterNode := Node{Name: masterName, Client: SetupSSHConn(masterName), NodeRole: "master", Configs: &NodeConfig{ + System: System, + Kube: Kube, + Knative: Knative, + Yurt: Yurt, + Demo: Demo}} nodeList = append(nodeList, masterNode) for _, name := range cloudNames { - nodeList = append(nodeList, node.Node{Name: name, Client: SetupSSHConn(name), NodeRole: "cloud", Configs: &node.NodeConfig{ - System: configs.System, - Kube: configs.Kube, - Knative: configs.Knative, - Yurt: configs.Yurt, - Demo: configs.Demo}}) + nodeList = append(nodeList, Node{Name: name, Client: SetupSSHConn(name), NodeRole: "cloud", Configs: &NodeConfig{ + System: System, + Kube: Kube, + Knative: Knative, + Yurt: Yurt, + Demo: Demo}}) } for _, name := range edgeNames { - nodeList = append(nodeList, node.Node{Name: name, Client: SetupSSHConn(name), NodeRole: "edge", Configs: &node.NodeConfig{ - System: configs.System, - Kube: configs.Kube, - Knative: configs.Knative, - Yurt: configs.Yurt, - Demo: configs.Demo}}) + nodeList = append(nodeList, Node{Name: name, Client: SetupSSHConn(name), NodeRole: "edge", Configs: &NodeConfig{ + System: System, + Kube: Kube, + Knative: Knative, + Yurt: Yurt, + Demo: Demo}}) } for _, node := range nodeList { @@ -139,7 +160,7 @@ func parseNodeInfo(nodesInfo NodesInfo) []node.Node { return nodeList } -func initializeNodes(nodesInfo NodesInfo) []node.Node { +func initializeNodes(nodesInfo NodesInfo) []Node { nodeList := parseNodeInfo(nodesInfo) // init system, all nodes are the same @@ -173,15 +194,96 @@ func deployNodes(deployerConfFile string) { utils.InfoPrintf(name) } utils.InfoPrintf("]\n") +} + +func deployKnative(deployerConfFile string) { + + nodesInfo, err := readAndUnMarshall(deployerConfFile) + utils.CheckErrorWithMsg(err, "Failed to read and unmarshal deployer configuration JSON") + nodeList := parseNodeInfo(nodesInfo) + masterNode := nodeList[0] // init knative utils.SuccessPrintf("Start to init knative\n") masterNode.InstallKnativeServing() masterNode.InstallKnativeEventing() utils.SuccessPrintf("Knative has been installed!\n") +} + +func deployOpenYurt(deployerConfFile string) { - // init demo environment - masterNode.BuildDemo(workerNodes) + nodesInfo, err := readAndUnMarshall(deployerConfFile) + utils.CheckErrorWithMsg(err, "Failed to read and unmarshal deployer configuration JSON") + nodeList := initializeNodes(nodesInfo) + masterNode := nodeList[0] + workerNodes := nodeList[1:] + + // init yurt cluster + utils.SuccessPrintf("Start to init yurt cluster!\n") + masterNode.YurtMasterInit() + + utils.WaitPrintf("Extracting master node information from logs") + output, err := masterNode.ExecShellCmd("sed -n '1p;2p;3p;4p' %s/masterNodeValues", masterNode.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to extract master node information from logs!\n") + + // Process the content and assign it to variables + lines := strings.Split(strings.TrimSpace(output), "\n") + if len(lines) != 4 { + utils.ErrorPrintf("Invalid file format") + return + } + + addr := lines[0] + port := lines[1] + token := lines[2] + + for _, worker := range workerNodes { + worker.YurtWorkerJoin(addr, port, token) + utils.InfoPrintf("worker %s joined yurt cluster!\n", worker.Configs.System.NodeHostName) + } + utils.SuccessPrintf("All nodes joined yurt cluster, start to expand\n") + for _, worker := range workerNodes { + masterNode.YurtMasterExpand(&worker) + utils.InfoPrintf("Master has expanded to worker:%s\n", worker.Configs.System.NodeHostName) + } + utils.SuccessPrintf("Master has expaned to all nodes!\n") + for _, node := range nodeList { + utils.InfoPrintf("node: %s\n", node.Name) + node.CleanUpTmpDir() + } utils.SuccessPrintf(">>>>>>>>>>>>>>>>OpenYurt Cluster Deployment Finished!<<<<<<<<<<<<<<<\n") } + +func demo(deployerConfFile string, isCloud bool) { + demoEnv := "Cloud" + if !isCloud { + demoEnv = "Edge" + } + utils.SuccessPrintf(">>>>>>>>>>>>>>>>Entering openyurt demo for [%s Node Pool]<<<<<<<<<<<<<<<\n", demoEnv) + + nodesInfo, err := readAndUnMarshall(deployerConfFile) + utils.CheckErrorWithMsg(err, "Failed to read and unmarshal deployer configuration JSON") + nodeList := initializeNodes(nodesInfo) + masterNode := nodeList[0] + workerNodes := nodeList[1:] + + // run demo, should only be executed after deployment + utils.SuccessPrintf("Start to init demo\n") + masterNode.Demo(isCloud) + utils.SuccessPrintf("Demo finished!\n") + masterNode.PrintDemoInfo(workerNodes, isCloud) +} + +func printDemo(deployerConfFile string) { + + nodesInfo, err := readAndUnMarshall(deployerConfFile) + utils.CheckErrorWithMsg(err, "Failed to read and unmarshal deployer configuration JSON") + + nodeList := initializeNodes(nodesInfo) + masterNode := nodeList[0] + workerNodes := nodeList[1:] + masterNode.GetNodeHostName() + masterNode.PrintDemoInfo(workerNodes, true) + masterNode.PrintDemoInfo(workerNodes, false) +} diff --git a/scripts/openyurt-deployer/main_test.go b/scripts/openyurt-deployer/main_test.go new file mode 100644 index 000000000..80773c706 --- /dev/null +++ b/scripts/openyurt-deployer/main_test.go @@ -0,0 +1,160 @@ +// MIT License +// +// Copyright (c) 2023 Jason Chua, Ruiqi Lai and vHive team +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package main + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestUnmarshallingJsonFile(t *testing.T) { + // Create a temporary file with JSON content + tempFile, err := ioutil.TempFile("", "test.json") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tempFile.Name()) + + // Define your sample JSON content + jsonContent := `{ + "master": "username@masterip", + "workers": { + "cloud": [ + "username@cloudip" + ], + "edge": [ + "username@edgeip" + ] + } + }` + + // Write the JSON content to the temporary file + err = ioutil.WriteFile(tempFile.Name(), []byte(jsonContent), 0644) + if err != nil { + t.Fatal(err) + } + + // Test the readAndUnMarshall function with the temporary file + result, err := readAndUnMarshall(tempFile.Name()) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + t.Logf("Unmarshall: %s", result) + // Define the expected result based on your JSON structure + expected := NodesInfo{ + // Initialize the fields based on your JSON structure + Master: "username@masterip", + Workers: Workers{ + Cloud: []string{"username@cloudip"}, + Edge: []string{"username@edgeip"}, + }, + } + + t.Logf("expected res: %s", expected) + // Compare the result with the expected value + if !jsonEqual(result, expected) { + t.Errorf("Expected %v, got %v", expected, result) + } +} + +// jsonEqual checks if two JSON objects are equal. +func jsonEqual(a, b interface{}) bool { + ajson, err := json.Marshal(a) + if err != nil { + return false + } + + bjson, err := json.Marshal(b) + if err != nil { + return false + } + + return string(ajson) == string(bjson) +} + +func TestParsingNodeDependencyVersion(t *testing.T) { + + // Capture standard output + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("Error creating pipe: %v", err) + } + defer r.Close() + defer w.Close() + + // Redirect standard output to the pipe + oldStdout := os.Stdout + os.Stdout = w + defer func() { + os.Stdout = oldStdout + }() + + // Call the function to be tested + nodeList := parseNodeInfo(mockNodesInfo) + t.Logf("nodeList: %v", nodeList) + + // Capture stdout + w.Close() + var capturedOutput strings.Builder + _, err = io.Copy(&capturedOutput, r) + if err != nil { + t.Fatalf("Error reading from pipe: %v", err) + } + t.Logf("captop: %v", capturedOutput.String()) + + // Line split stdout + lines := strings.Split(capturedOutput.String(), "\n") + for _, line := range lines { + // Example: Check for keywords and versions + if strings.Contains(line, " Golang(version") { + t.Logf("line: %s", line) + if !strings.Contains(line, criteriaTable["Golang"]) { + t.Logf("failing: %s", line) + t.Errorf("Expected version %s not found in output for keyword %s", criteriaTable["Golang"], "Golang") + } + } else if strings.Contains(line, "containerd(version") { + t.Logf("line: %s", line) + if !strings.Contains(line, criteriaTable["containerd"]) { + t.Logf("failing: %s", line) + t.Errorf("Expected version %s not found in output for keyword %s", criteriaTable["containerd"], "containerd") + } + } else if strings.Contains(line, "runc(version") { + t.Logf("line: %s", line) + if !strings.Contains(line, criteriaTable["runc"]) { + t.Logf("failing: %s", line) + t.Errorf("Expected version %s not found in output for keyword %s", criteriaTable["runc"], "runc") + } + } else if strings.Contains(line, "CNI plugins(version") { + t.Logf("line: %s", line) + if !strings.Contains(line, criteriaTable["CNI"]) { + t.Logf("failing: %s", line) + t.Errorf("Expected version %s not found in output for keyword %s", criteriaTable["CNI"], "CNI") + } + } + } +} diff --git a/scripts/openyurt-deployer/node.go b/scripts/openyurt-deployer/node.go new file mode 100644 index 000000000..ffaff9978 --- /dev/null +++ b/scripts/openyurt-deployer/node.go @@ -0,0 +1,1089 @@ +// MIT License +// +// Copyright (c) 2023 Jason Chua, Ruiqi Lai and vHive team +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package main + +import ( + "fmt" + "path" + "strings" + "time" + + "github.com/sfreiberg/simplessh" + "github.com/vhive-serverless/vHive/scripts/utils" +) + +type NodeConfig struct { + System SystemEnvironmentStruct + Kube KubeConfigStruct + Knative KnativeConfigStruct + Yurt YurtEnvironment + Demo DemoEnvironment +} + +type Node struct { + Name string + Client *simplessh.Client + NodeRole string + Configs *NodeConfig +} + +func (node *Node) ExecShellCmd(cmd string, pars ...any) (string, error) { + shellCmd := fmt.Sprintf(cmd, pars...) + out, err := node.Client.Exec(shellCmd) + if err != nil { + utils.WarnPrintf("node: [%s] failed to exec: \n%s\nerror:%s\n", node.Name, shellCmd, out) + } + return strings.TrimSuffix(string(out), "\n"), err +} + +func (node *Node) OnlyExecByMaster() { + if node.NodeRole != "master" { + utils.FatalPrintf("This function can only be executed by master node!\n") + } +} + +func (node *Node) OnlyExecByWorker() { + if node.NodeRole == "master" { + utils.FatalPrintf("This function can only be executed by worker node!\n") + } +} + +func (node *Node) SetMasterAsCloud(asCloud bool) { + node.OnlyExecByMaster() + node.Configs.Yurt.MasterAsCloud = asCloud +} + +// System related functions + +// Detect current architecture +func (node *Node) DetectArch() { + utils.WaitPrintf("Detetcting current arch") + out, err := node.ExecShellCmd("dpkg --print-architecture") + utils.CheckErrorWithMsg(err, "Failed to get current arch!\n") + node.Configs.System.CurrentArch = out + switch node.Configs.System.CurrentArch { + default: + utils.InfoPrintf("Detected Arch: %s for node: %s\n", node.Configs.System.CurrentArch, node.Name) + } +} + +// Detect current operating system +func (node *Node) DetectOS() { + switch node.Configs.System.CurrentOS { + case "windows": + utils.FatalPrintf("Unsupported OS: %s\n", node.Configs.System.CurrentOS) + default: + var err error + node.Configs.System.CurrentOS, err = node.ExecShellCmd("sed -n 's/^NAME=\"\\(.*\\)\"/\\1/p' < /etc/os-release | head -1 | tr '[:upper:]' '[:lower:]'") + utils.InfoPrintf("Detected OS: %s\n", node.Configs.System.CurrentOS) + utils.CheckErrorWithMsg(err, "Failed to get Linux distribution info!\n") + switch node.Configs.System.CurrentOS { + case "ubuntu": + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + } + utils.InfoPrintf("Detected OS: %s for node: %s\n", + strings.TrimSuffix(string(node.Configs.System.CurrentOS), "\n"), + node.Name) + } +} + +// Get current directory +func (node *Node) GetCurrentDir() { + var err error + node.Configs.System.CurrentDir, err = node.ExecShellCmd("pwd") + utils.CheckErrorWithMsg(err, "Failed to get get current directory!\n") +} + +// Get current home directory +func (node *Node) GetUserHomeDir() { + var err error + node.Configs.System.UserHomeDir, err = node.ExecShellCmd("echo $HOME") + utils.CheckErrorWithMsg(err, "Failed to get current home directory!\n") +} + +// Get current node's hostname +func (node *Node) GetNodeHostName() { + var err error + node.Configs.System.NodeHostName, err = node.ExecShellCmd("echo $HOSTNAME") + utils.CheckErrorWithMsg(err, "Failed to get current node hostname!\n") +} + +// Create temporary directory +func (node *Node) CreateTmpDir() { + var err error + utils.InfoPrintf("Creating temporary directory") + tmpDir := "~/yurt_tmp" + _, err = node.ExecShellCmd("mkdir -p %s", tmpDir) + node.Configs.System.TmpDir = tmpDir + utils.CheckErrorWithMsg(err, "Failed to create temporary directory!\n") +} + +// Clean up temporary directory +func (node *Node) CleanUpTmpDir() { + utils.InfoPrintf("Cleaning up temporary directory") + _, err := node.ExecShellCmd("rm -rf %s/*", node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to create temporary directory!\n") +} + +// Extract arhive file to specific directory(currently support .tar.gz file only) +func (node *Node) ExtractToDir(filePath string, dirPath string, privileged bool) error { + var err error + if privileged { + _, err = node.ExecShellCmd("sudo tar -xzvf %s -C %s", filePath, dirPath) + } else { + _, err = node.ExecShellCmd("tar -xzvf %s -C %s", filePath, dirPath) + } + return err +} + +// Append directory to PATH variable for bash & zsh +func (node *Node) AppendDirToPath(pathTemplate string, pars ...any) error { + appendedPath := fmt.Sprintf(pathTemplate, pars...) + + // For bash + _, err := node.ExecShellCmd("echo 'export PATH=$PATH:%s' >> %s/.bashrc", appendedPath, node.Configs.System.UserHomeDir) + if err != nil { + return err + } + // For zsh + _, err = node.LookPath("zsh") + if err != nil { + _, err = node.ExecShellCmd("echo 'export PATH=$PATH:%s' >> %s/.zshrc", appendedPath, node.Configs.System.UserHomeDir) + } + return err +} + +// Turn off unattended-upgrades +func (node *Node) TurnOffAutomaticUpgrade() (string, error) { + switch node.Configs.System.CurrentOS { + case "ubuntu": + _, err := node.ExecShellCmd("stat /etc/apt/apt.conf.d/20auto-upgrades") + if err == nil { + return node.ExecShellCmd("sudo sed -i 's/\"1\"/\"0\"/g' /etc/apt/apt.conf.d/20auto-upgrades") + } + return "", nil + default: + return "", nil + } +} + +// Install packages on various OS +func (node *Node) InstallPackages(packagesTemplate string, pars ...any) error { + packages := fmt.Sprintf(packagesTemplate, pars...) + switch node.Configs.System.CurrentOS { + case "ubuntu": + _, err := node.ExecShellCmd(`sudo apt-get -qq update && \ + sudo apt-get -qq install -y --allow-downgrades --allow-change-held-packages %s`, packages) + return err + case "centos": + _, err := node.ExecShellCmd("sudo dnf -y -q install %s", packages) + return err + case "rocky linux": + _, err := node.ExecShellCmd("sudo dnf -y -q install %s", packages) + return err + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + return &utils.ShellError{Msg: "Unsupported Linux distribution", ExitCode: 1} + } +} + +// Download file to temporary directory (absolute path of downloaded file will be the first return value if successful) +func (node *Node) DownloadToTmpDir(urlTemplate string, pars ...any) (string, error) { + url := fmt.Sprintf(urlTemplate, pars...) + fileName := path.Base(url) + filePath := node.Configs.System.TmpDir + "/" + fileName + _, err := node.ExecShellCmd("curl -sSL --output %s %s", filePath, url) + return filePath, err +} + +func (node *Node) LookPath(path string) (string, error) { + return node.ExecShellCmd("command -v %s", path) +} + +// Check system environment +func (node *Node) CheckSystemEnvironment() { + // Check system environment + utils.InfoPrintf("Checking system environment...\n") + var err error + + // Check Golang + _, err = node.LookPath("go") + if err != nil { + utils.InfoPrintf("Golang not found! Golang(version %s) will be automatically installed!\n", + node.Configs.System.GoVersion) + } else { + utils.InfoPrintf("Golang found!\n") + node.Configs.System.GoInstalled = true + } + + // Check Containerd + _, err = node.LookPath("containerd") + if err != nil { + utils.InfoPrintf("Containerd not found! containerd(version %s) will be automatically installed!\n", + node.Configs.System.ContainerdVersion) + } else { + utils.InfoPrintf("Containerd found!\n") + node.Configs.System.ContainerdInstalled = true + } + + // Check runc + _, err = node.LookPath("runc") + if err != nil { + utils.InfoPrintf("runc not found! runc(version %s) will be automatically installed!\n", + node.Configs.System.RuncVersion) + } else { + utils.InfoPrintf("runc found!\n") + node.Configs.System.RuncInstalled = true + } + + // Check CNI plugins + _, err = node.ExecShellCmd("stat /opt/cni/bin") + if err != nil { + utils.InfoPrintf("CNI plugins not found! CNI plugins(version %s) will be automatically installed!\n", + node.Configs.System.CniPluginsVersion) + } else { + utils.InfoPrintf("CNI plugins found!\n") + node.Configs.System.CniPluginsInstalled = true + } + + // Add OS-specific dependencies to installation lists + switch node.Configs.System.CurrentOS { + case "ubuntu": + node.Configs.System.Dependencies = "git wget curl build-essential apt-transport-https ca-certificates" + case "rocky linux": + node.Configs.System.Dependencies = "" + case "centos": + node.Configs.System.Dependencies = "" + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + } + + utils.InfoPrintf("Finish checking system environment!\n") +} + +func (node *Node) ReadSystemInfo() { + node.DetectOS() + node.DetectArch() + node.GetCurrentDir() + node.GetUserHomeDir() + node.GetNodeHostName() + node.CheckSystemEnvironment() +} + +// Initialize system environment +func (node *Node) SystemInit() { + utils.InfoPrintf("Start init system environment for node:%s\n", node.Name) + // Initialize + + var err error + + // node.ReadSystemInfo() // technically, this is not necessary + node.CreateTmpDir() + // defer node.CleanUpTmpDir() + + // Turn off unattended-upgrades on ubuntu + utils.InfoPrintf("Turning off automatic upgrade") + _, err = node.TurnOffAutomaticUpgrade() + utils.CheckErrorWithMsg(err, "Failed to turn off automatic upgrade!\n") + + // Disable swap + utils.InfoPrintf("Disabling swap") + _, err = node.ExecShellCmd("sudo swapoff -a && sudo cp /etc/fstab /etc/fstab.old") // Turn off Swap && Backup fstab file + utils.CheckErrorWithMsg(err, "Failed to disable swap!\n") + + utils.InfoPrintf("Modifying fstab") + // Modify fstab to disable swap permanently + _, err = node.ExecShellCmd("sudo sed -i 's/#\\s*\\(.*swap.*\\)/\\1/g' /etc/fstab && sudo sed -i 's/.*swap.*/# &/g' /etc/fstab") + utils.CheckErrorWithMsg(err, "Failed to dodify fstab!\n") + + // Install dependencies + utils.InfoPrintf("Installing dependencies") + err = node.InstallPackages(node.Configs.System.Dependencies) + utils.CheckErrorWithMsg(err, "Failed to install dependencies!\n") + + // Install Golang + if !node.Configs.System.GoInstalled { + // Download & Extract Golang + utils.InfoPrintf("Downloading Golang(ver %s)", node.Configs.System.GoVersion) + filePathName, err := node.DownloadToTmpDir(node.Configs.System.GoDownloadUrlTemplate, + node.Configs.System.GoVersion, + node.Configs.System.CurrentArch) + utils.CheckErrorWithMsg(err, "Failed to download Golang(ver %s)!\n", node.Configs.System.GoVersion) + utils.InfoPrintf("Extracting Golang") + _, err = node.ExecShellCmd("sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf %s", filePathName) + utils.CheckErrorWithMsg(err, "Failed to extract Golang!\n") + + // For bash + _, err = node.ExecShellCmd("echo 'export PATH=$PATH:/usr/local/go/bin' >> %s/.bashrc", node.Configs.System.UserHomeDir) + utils.CheckErrorWithMsg(err, "Failed to update PATH!\n") + // For zsh + _, err = node.LookPath("zsh") + if err != nil { + _, err = node.ExecShellCmd("echo 'export PATH=$PATH:/usr/local/go/bin' >> %s/.zshrc", node.Configs.System.UserHomeDir) + utils.CheckErrorWithMsg(err, "Failed to update PATH!\n") + } + } + + // Install containerd + if !node.Configs.System.ContainerdInstalled { + // Download containerd + utils.InfoPrintf("Downloading containerd(ver %s)", node.Configs.System.ContainerdVersion) + filePathName, err := node.DownloadToTmpDir( + node.Configs.System.ContainerdDownloadUrlTemplate, + node.Configs.System.ContainerdVersion, + node.Configs.System.ContainerdVersion, + node.Configs.System.CurrentArch) + utils.CheckErrorWithMsg(err, "Failed to Download containerd(ver %s)\n", node.Configs.System.ContainerdVersion) + // Extract containerd + utils.InfoPrintf("Extracting containerd") + _, err = node.ExecShellCmd("sudo tar Cxzvf /usr/local %s", filePathName) + utils.CheckErrorWithMsg(err, "Failed to extract containerd!\n") + // Start containerd via systemd + utils.InfoPrintf("Downloading systemd profile for containerd") + filePathName, err = node.DownloadToTmpDir(node.Configs.System.ContainerdSystemdProfileDownloadUrl) + utils.CheckErrorWithMsg(err, "Failed to download systemd profile for containerd!\n") + utils.InfoPrintf("Starting containerd via systemd") + _, err = node.ExecShellCmd("sudo cp %s /lib/systemd/system/ && sudo systemctl daemon-reload && sudo systemctl enable --now containerd", filePathName) + utils.CheckErrorWithMsg(err, "Failed to start containerd via systemd!\n") + } + + // Install runc + if !node.Configs.System.RuncInstalled { + // Download runc + utils.InfoPrintf("Downloading runc(ver %s)", node.Configs.System.RuncVersion) + filePathName, err := node.DownloadToTmpDir( + node.Configs.System.RuncDownloadUrlTemplate, + node.Configs.System.RuncVersion, + node.Configs.System.CurrentArch) + utils.CheckErrorWithMsg(err, "Failed to download runc(ver %s)!\n", node.Configs.System.RuncVersion) + // Install runc + utils.InfoPrintf("Installing runc") + _, err = node.ExecShellCmd("sudo install -m 755 %s /usr/local/sbin/runc", filePathName) + utils.CheckErrorWithMsg(err, "Failed to install runc!\n") + } + + // Install CNI plugins + if !node.Configs.System.CniPluginsInstalled { + utils.InfoPrintf("Downloading CNI plugins(ver %s)", node.Configs.System.CniPluginsVersion) + filePathName, err := node.DownloadToTmpDir( + node.Configs.System.CniPluginsDownloadUrlTemplate, + node.Configs.System.CniPluginsVersion, + node.Configs.System.CurrentArch, + node.Configs.System.CniPluginsVersion) + utils.CheckErrorWithMsg(err, "Failed to download CNI plugins(ver %s)!\n", node.Configs.System.CniPluginsVersion) + utils.InfoPrintf("Extracting CNI plugins") + _, err = node.ExecShellCmd("sudo mkdir -p /opt/cni/bin && sudo tar Cxzvf /opt/cni/bin %s", filePathName) + utils.CheckErrorWithMsg(err, "Failed to extract CNI plugins!\n") + } + + // Configure the systemd cgroup driver + utils.InfoPrintf("Configuring the systemd cgroup driver") + _, err = node.ExecShellCmd(`containerd config default > %s && + sudo mkdir -p /etc/containerd && + sudo cp %s /etc/containerd/config.toml && + sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml && + sudo systemctl restart containerd`, + node.Configs.System.TmpDir+"/config.toml", + node.Configs.System.TmpDir+"/config.toml") + utils.CheckErrorWithMsg(err, "Failed to configure the systemd cgroup driver!\n") + + // Enable IP forwading & br_netfilter + utils.InfoPrintf("Enabling IP forwading & br_netfilter") + _, err = node.ExecShellCmd(`sudo modprobe br_netfilter && sudo modprobe overlay && + sudo sysctl -w net.ipv4.ip_forward=1 && + sudo sysctl -w net.ipv4.conf.all.forwarding=1 && + sudo sysctl -w net.bridge.bridge-nf-call-iptables=1 && + sudo sysctl -w net.bridge.bridge-nf-call-ip6tables=1`) + utils.CheckErrorWithMsg(err, "Failed to enable IP forwading & br_netfilter!\n") + // Ensure Boot-Resistant + utils.InfoPrintf("Ensuring Boot-Resistant") + _, err = node.ExecShellCmd(`echo 'br_netfilter' | + sudo tee /etc/modules-load.d/netfilter.conf && + echo 'overlay' | sudo tee -a /etc/modules-load.d/netfilter.conf && + sudo sed -i 's/# *net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf && + sudo sed -i 's/net.ipv4.ip_forward=0/net.ipv4.ip_forward=1/g' /etc/sysctl.conf && + echo 'net.bridge.bridge-nf-call-iptables=1\nnet.bridge.bridge-nf-call-ip6tables=1\nnet.ipv4.conf.all.forwarding=1' | + sudo tee /etc/sysctl.d/99-kubernetes-cri.conf`) + utils.CheckErrorWithMsg(err, "Failed to ensure Boot-Resistant!\n") + + // Install kubeadm, kubelet, kubectl + switch node.Configs.System.CurrentOS { + case "ubuntu": + // Download Google Cloud public signing key and Add the Kubernetes apt repository + utils.InfoPrintf("Adding the Kubernetes apt repository") + _, err = node.ExecShellCmd(`sudo mkdir -p -m 755 /etc/apt/keyrings && curl -fsSL %sRelease.key | + sudo gpg --batch --yes --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg && + echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] %s /' | + sudo tee /etc/apt/sources.list.d/kubernetes.list`, node.Configs.System.KubeRepoUrl, node.Configs.System.KubeRepoUrl) + utils.CheckErrorWithMsg(err, "Failed to add the Kubernetes apt repository!\n") + // Install kubeadm, kubelet, kubectl via apt + utils.InfoPrintf("Installing kubeadm, kubelet, kubectl") + err = node.InstallPackages("kubeadm=%s kubelet=%s kubectl=%s", + node.Configs.System.KubeVersion, + node.Configs.System.KubeVersion, + node.Configs.System.KubeVersion) + utils.CheckErrorWithMsg(err, "Failed to install kubeadm, kubelet, kubectl!\n") + // Lock kubeadm, kubelet, kubectl version + utils.InfoPrintf("Locking kubeadm, kubelet, kubectl version") + _, err = node.ExecShellCmd("sudo apt-mark hold kubelet kubeadm kubectl") + utils.CheckErrorWithMsg(err, "Failed to lock kubeadm, kubelet, kubectl version!\n") + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + } +} + +// Kubernetes related functions +func (node *Node) KubeMasterInit() (string, string, string, string) { + + // Initialize + var err error + node.CreateTmpDir() + + // Pre-pull Image + utils.WaitPrintf("Pre-Pulling required images") + shellCmd := fmt.Sprintf("sudo kubeadm config images pull --kubernetes-version %s ", node.Configs.Kube.K8sVersion) + if len(node.Configs.Kube.AlternativeImageRepo) > 0 { + shellCmd = fmt.Sprintf(shellCmd+"--image-repository %s ", node.Configs.Kube.AlternativeImageRepo) + } + _, err = node.ExecShellCmd(shellCmd) + utils.CheckErrorWithMsg(err, "Failed to pre-pull required images!\n") + + // Deploy Kubernetes + utils.WaitPrintf("Deploying Kubernetes(version %s)", node.Configs.Kube.K8sVersion) + shellCmd = fmt.Sprintf("sudo kubeadm init --kubernetes-version %s --pod-network-cidr=\"%s\" ", + node.Configs.Kube.K8sVersion, + node.Configs.Kube.PodNetworkCidr) + if len(node.Configs.Kube.AlternativeImageRepo) > 0 { + shellCmd = fmt.Sprintf(shellCmd+"--image-repository %s ", node.Configs.Kube.AlternativeImageRepo) + } + if len(node.Configs.Kube.ApiserverAdvertiseAddress) > 0 { + shellCmd = fmt.Sprintf(shellCmd+"--apiserver-advertise-address=%s ", node.Configs.Kube.ApiserverAdvertiseAddress) + } + shellCmd = fmt.Sprintf(shellCmd+"| tee %s/masterNodeInfo", node.Configs.System.TmpDir) + _, err = node.ExecShellCmd(shellCmd) + utils.CheckErrorWithMsg(err, "Failed to deploy Kubernetes(version %s)!\n", node.Configs.Kube.K8sVersion) + + // Make kubectl work for non-root user + utils.WaitPrintf("Making kubectl work for non-root user") + _, err = node.ExecShellCmd(`mkdir -p %s/.kube && + sudo cp -i /etc/kubernetes/admin.conf %s/.kube/config && sudo chown $(id -u):$(id -g) %s/.kube/config`, + node.Configs.System.UserHomeDir, + node.Configs.System.UserHomeDir, + node.Configs.System.UserHomeDir) + utils.CheckErrorWithMsg(err, "Failed to make kubectl work for non-root user!\n") + + // Install Calico network add-on + utils.WaitPrintf("Installing pod network") + _, err = node.ExecShellCmd("kubectl apply -f %s", node.Configs.Kube.PodNetworkAddonConfigURL) + utils.CheckErrorWithMsg(err, "Failed to install pod network!\n") + + // Extract master node information from logs + utils.WaitPrintf("Extracting master node information from logs") + shellOut, err := node.ExecShellCmd("sed -n '/.*kubeadm join.*/p' < %s/masterNodeInfo |"+ + "sed -n 's/.*join \\(.*\\):\\(\\S*\\) --token \\(\\S*\\).*/\\1 \\2 \\3/p'", node.Configs.System.TmpDir) + utils.InfoPrintf("shellOut 2: %s\n", shellOut) //DEBUG + utils.CheckErrorWithMsg(err, "Failed to extract master node information from logs!\n") + splittedOut := strings.Split(shellOut, " ") + utils.InfoPrintf("spiltOut 3: %s\n", splittedOut) //DEBUG + node.Configs.Kube.ApiserverAdvertiseAddress = splittedOut[0] + node.Configs.Kube.ApiserverPort = splittedOut[1] + node.Configs.Kube.ApiserverToken = splittedOut[2] + shellOut, err = node.ExecShellCmd("sed -n '/.*sha256:.*/p' < %s/masterNodeInfo | sed -n 's/.*\\(sha256:\\S*\\).*/\\1/p'", node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to extract master node information from logs!\n") + node.Configs.Kube.ApiserverTokenHash = shellOut + + shellData := fmt.Sprintf("echo '%s\n%s\n%s\n%s' > %s/masterNodeValues", + node.Configs.Kube.ApiserverAdvertiseAddress, + node.Configs.Kube.ApiserverPort, + node.Configs.Kube.ApiserverToken, + node.Configs.Kube.ApiserverTokenHash, + node.Configs.System.TmpDir) + _, err = node.ExecShellCmd(shellData) + utils.CheckErrorWithMsg(err, "Failed to write master node information to file!\n") + + return node.Configs.Kube.ApiserverAdvertiseAddress, + node.Configs.Kube.ApiserverPort, + node.Configs.Kube.ApiserverToken, + node.Configs.Kube.ApiserverTokenHash + +} + +func (node *Node) KubeClean() { + utils.InfoPrintf("Cleaning Kube in node: %s\n", node.Name) + var err error + if node.NodeRole == "master" { + // kubectl cordon {workerNodeName} + // kubectl drain {NodeName} --delete-local-data --force --ignore-daemonsets + // kubectl delete node {NodeName} + + utils.WaitPrintf("Reseting kube cluster and rm .kube file") + // TODO: delete master last, need to check defer can work or not + defer node.ExecShellCmd("sudo kubeadm reset -f && rm -rf $HOME/.kube && rm -rf /etc/cni/net.d") + // The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d + } else { + + utils.WaitPrintf("Reseting kube cluster") + _, err = node.ExecShellCmd("sudo kubeadm reset -f && rm -rf /etc/cni/net.d") + } + utils.CheckErrorWithMsg(err, "Failed to clean kube cluster!\n") + +} + +// Join worker node to Kubernetes cluster +func (node *Node) KubeWorkerJoin(apiServerAddr string, apiServerPort string, apiServerToken string, apiServerTokenHash string) { + + // Initialize + var err error + + // Join Kubernetes cluster + utils.WaitPrintf("Joining Kubernetes cluster") + _, err = node.ExecShellCmd("sudo kubeadm join %s:%s --token %s --discovery-token-ca-cert-hash %s", + apiServerAddr, + apiServerPort, + apiServerToken, + apiServerTokenHash) + utils.CheckErrorWithMsg(err, "Failed to join Kubernetes cluster!\n") +} + +func (node *Node) GetAllNodes() []string { + utils.WaitPrintf("Get all nodes...") + if node.NodeRole != "master" { + utils.ErrorPrintf("GetAllNodes can only be executed on master node!\n") + return []string{} + } + out, err := node.ExecShellCmd("kubectl get nodes | awk 'NR>1 {print $1}'") + utils.CheckErrorWithMsg(err, "Failed to get nodes from cluster!\n") + nodeNames := strings.Split(out, "\n") + return nodeNames +} + +// Knative related functions +// Install Knative Serving +func (node *Node) InstallKnativeServing() { + node.OnlyExecByMaster() + var err error + + node.CreateTmpDir() + + // Install and configure MetalLB + utils.WaitPrintf("Installing and configuring MetalLB") + _, err = node.ExecShellCmd(`kubectl get configmap kube-proxy -n kube-system -o yaml | + sed -e "s/strictARP: false/strictARP: true/" | + kubectl apply -f - -n kube-system`) + utils.CheckErrorWithMsg(err, "Failed to apply config map MetalLB!") + _, err = node.ExecShellCmd("kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v%s/config/manifests/metallb-native.yaml", node.Configs.Knative.MetalLBVersion) + utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") + _, err = node.ExecShellCmd("kubectl -n metallb-system wait deploy controller --timeout=600s --for=condition=Available") + utils.CheckErrorWithMsg(err, "Failed to wait for deployment MetalLB!") + for _, value := range node.Configs.Knative.MetalLBConfigURLArray { + _, err = node.ExecShellCmd("kubectl apply -f %s", value) + utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB array list!") + } + utils.SuccessPrintf("\n") + + // Install istio + // Download istio + utils.WaitPrintf("Downloading istio") + istioFilePath, err := node.DownloadToTmpDir(node.GetIstioDownloadUrl()) + utils.CheckErrorWithMsg(err, "Failed to download istio!") + // Extract istio + utils.WaitPrintf("Extracting istio") + err = node.ExtractToDir(istioFilePath, "/usr/local", true) + utils.CheckErrorWithMsg(err, "Failed to extract istio!") + // Update PATH + err = node.AppendDirToPath("/usr/local/istio-%s/bin", node.Configs.Knative.IstioVersion) + utils.CheckErrorWithMsg(err, "Failed to update PATH!") + // Deploy istio operator + utils.WaitPrintf("Deploying istio operator") + operatorConfigPath, err := node.DownloadToTmpDir(node.Configs.Knative.IstioOperatorConfigUrl) + utils.CheckErrorWithMsg(err, "Failed to download istio operator config!") + _, err = node.ExecShellCmd("sudo /usr/local/istio-%s/bin/istioctl install -y -f %s", + node.Configs.Knative.IstioVersion, + operatorConfigPath) + utils.CheckErrorWithMsg(err, "Failed to deploy istio operator!") + + // Install Knative Serving component + utils.WaitPrintf("Installing Knative Serving component") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/serving/releases/download/knative-v%s/serving-crds.yaml", + node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install Knative Serving component!") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/serving/releases/download/knative-v%s/serving-core.yaml", + node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install Knative Serving component!") + + // Install local cluster registry + utils.WaitPrintf("Installing local cluster registry") + _, err = node.ExecShellCmd("kubectl create namespace registry") + utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") + configFilePath, err := node.DownloadToTmpDir("%s", node.Configs.Knative.LocalRegistryVolumeConfigUrl) + utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") + _, err = node.ExecShellCmd("REPO_VOL_SIZE=%s envsubst < %s | kubectl create --filename -", + node.Configs.Knative.LocalRegistryRepoVolumeSize, + configFilePath) + utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") + _, err = node.ExecShellCmd("kubectl create -f %s && kubectl apply -f %s", + node.Configs.Knative.LocalRegistryDockerRegistryConfigUrl, + node.Configs.Knative.LocalRegistryHostUpdateConfigUrl) + utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") + + // Configure Magic DNS + utils.WaitPrintf("Configuring Magic DNS") + _, err = node.ExecShellCmd("kubectl apply -f %s", node.Configs.Knative.MagicDNSConfigUrl) + utils.CheckErrorWithMsg(err, "Failed to configure Magic DNS!") + + // Install networking layer + utils.WaitPrintf("Installing networking layer") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v%s/net-istio.yaml", + node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install networking layer!") + + // Logs for verification + _, err = node.ExecShellCmd("kubectl get pods -n knative-serving") + utils.CheckErrorWithMsg(err, "Verification Failed!") + + // enable node selector + utils.WaitPrintf("Enable node selector in knative serving") + _, err = node.ExecShellCmd(`kubectl patch cm config-features -n knative-serving \ + --type merge \ + -p '{"data":{"kubernetes.podspec-nodeselector":"enabled"}}' +`) + utils.CheckErrorWithMsg(err, "Failed to enable node selector in knative serving") + // node.enableNodeSelect() +} + +// Install Knative Eventing +func (node *Node) InstallKnativeEventing() { + // Install Knative Eventing component + utils.WaitPrintf("Installing Knative Eventing component") + _, err := node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/eventing-crds.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install Knative Eventing component!") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/eventing-core.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install Knative Eventing component!") + + // Logs for verification + _, err = node.ExecShellCmd("kubectl get pods -n knative-eventing") + utils.CheckErrorWithMsg(err, "Verification Failed!") + + // Install a default Channel (messaging) layer + utils.WaitPrintf("Installing a default Channel (messaging) layer") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/in-memory-channel.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install a default Channel (messaging) layer!") + + // Install a Broker layer + utils.WaitPrintf("Installing a Broker layer") + _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/mt-channel-broker.yaml", node.Configs.Knative.KnativeVersion) + utils.CheckErrorWithMsg(err, "Failed to install a Broker layer!") + + // Logs for verification + _, err = node.ExecShellCmd("kubectl --namespace istio-system get service istio-ingressgateway") + utils.CheckErrorWithMsg(err, "Verification Failed!") +} + +// get istio download URL +func (node *Node) GetIstioDownloadUrl() string { + knative := node.Configs.Knative + return fmt.Sprintf(knative.IstioDownloadUrlTemplate, knative.IstioVersion, knative.IstioVersion, node.Configs.System.CurrentArch) +} + +// Open yurt Related functions +func (node *Node) CheckYurtMasterEnvironment() { + node.OnlyExecByMaster() + // Check environment + var err error + utils.InfoPrintf("Checking system environment...\n") + + // Check Helm + _, err = node.LookPath("helm") + if err != nil { + utils.WarnPrintf("Helm not found! Helm will be automatically installed!\n") + } else { + utils.SuccessPrintf("Helm found!\n") + node.Configs.Yurt.HelmInstalled = true + } + + // Check Kustomize + _, err = node.LookPath("kustomize") + if err != nil { + utils.WarnPrintf("Kustomize not found! Kustomize will be automatically installed!\n") + } else { + utils.SuccessPrintf("Kustomize found!\n") + node.Configs.Yurt.KustomizeInstalled = true + } + + // Add OS-specific dependencies to installation lists + switch node.Configs.System.CurrentOS { + case "ubuntu": + node.Configs.Yurt.Dependencies = "curl apt-transport-https ca-certificates build-essential git" + case "rocky linux": + node.Configs.Yurt.Dependencies = "" + case "centos": + node.Configs.Yurt.Dependencies = "" + default: + utils.FatalPrintf("Unsupported OS: %s\n", node.Configs.System.CurrentOS) + } + + utils.SuccessPrintf("Finished checking system environment!\n") +} + +// Initialize Openyurt on master node +func (node *Node) YurtMasterInit() { + node.OnlyExecByMaster() + // Initialize + var err error + node.CheckYurtMasterEnvironment() + node.CreateTmpDir() + // defer node.CleanUpTmpDir() + + // Install dependencies + utils.WaitPrintf("Installing dependencies") + err = node.InstallPackages(node.Configs.Yurt.Dependencies) + utils.CheckErrorWithMsg(err, "Failed to install dependencies!\n") + + // Treat master as cloud node + if node.Configs.Yurt.MasterAsCloud { + utils.WarnPrintf("Master node WILL also be treated as a cloud node!\n") + node.ExecShellCmd("kubectl taint nodes --all node-role.kubernetes.io/master:NoSchedule-") + node.ExecShellCmd("kubectl taint nodes --all node-role.kubernetes.io/control-plane-") + } + + // Install helm + if !node.Configs.Yurt.HelmInstalled { + switch node.Configs.System.CurrentOS { + case "ubuntu": + // Download public signing key && Add the Helm apt repository + utils.WaitPrintf("Downloading public signing key && Add the Helm apt repository") + // Download public signing key + filePathName, err := node.DownloadToTmpDir(node.Configs.Yurt.HelmPublicSigningKeyDownloadUrl) + utils.CheckErrorWithMsg(err, "Failed to download public signing key && add the Helm apt repository!\n") + _, err = node.ExecShellCmd("sudo mkdir -p /usr/share/keyrings && cat %s | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null", filePathName) + utils.CheckErrorWithMsg(err, "Failed to download public signing key && add the Helm apt repository!\n") + // Add the Helm apt repository + _, err = node.ExecShellCmd(`echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | + sudo tee /etc/apt/sources.list.d/helm-stable-debian.list`) + utils.CheckErrorWithMsg(err, "Failed to download public signing key && add the Helm apt repository!\n") + // Install helm + utils.WaitPrintf("Installing Helm") + err = node.InstallPackages("helm") + utils.CheckErrorWithMsg(err, "Failed to install helm!\n") + default: + utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) + } + } + + // Install kustomize + if !node.Configs.Yurt.KustomizeInstalled { + // Download kustomize helper script + utils.WaitPrintf("Downloading kustomize") + filePathName, err := node.DownloadToTmpDir(node.Configs.Yurt.KustomizeScriptDownloadUrl) + utils.CheckErrorWithMsg(err, "Failed to download kustomize!\n") + // Download kustomize + _, err = node.ExecShellCmd("chmod u+x %s && %s %s", filePathName, filePathName, node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to download kustomize!\n") + // Install kustomize + utils.WaitPrintf("Installing kustomize") + _, err = node.ExecShellCmd("sudo cp %s /usr/local/bin", node.Configs.System.TmpDir+"/kustomize") + utils.CheckErrorWithMsg(err, "Failed to Install kustomize!\n") + } + + // Add OpenYurt repo with helm + utils.WaitPrintf("Adding OpenYurt repo(version %s) with helm", node.Configs.Yurt.YurtVersion) + _, err = node.ExecShellCmd("git clone --quiet https://github.com/openyurtio/openyurt-helm.git %s/openyurt-helm && pushd %s/openyurt-helm && git checkout openyurt-%s && popd", + node.Configs.System.TmpDir, + node.Configs.System.TmpDir, + node.Configs.Yurt.YurtVersion) + utils.CheckErrorWithMsg(err, "Failed to add OpenYurt repo with helm!\n") + + // Deploy yurt-app-manager + utils.WaitPrintf("Deploying yurt-app-manager") + _, err = node.ExecShellCmd("helm install yurt-app-manager -n kube-system %s/openyurt-helm/charts/yurt-app-manager", node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to deploy yurt-app-manager!\n") + + // Wait for yurt-app-manager to be ready + utils.WaitPrintf("Waiting for yurt-app-manager to be ready") + waitCount := 1 + for { + yurtAppManagerStatus, err := node.ExecShellCmd(`kubectl get pod -n kube-system | grep yurt-app-manager | sed -n "s/\s*\(\S*\)\s*\(\S*\)\s*\(\S*\).*/\2 \3/p"`) + utils.CheckErrorWithMsg(err, "Failed to wait for yurt-app-manager to be ready!\n") + if yurtAppManagerStatus == "1/1 Running" { + utils.SuccessPrintf("\n") + break + } else { + utils.WarnPrintf("Waiting for yurt-app-manager to be ready [%ds]\n", waitCount) + waitCount += 1 + time.Sleep(time.Second) + } + } + + // Deploy yurt-controller-manager + utils.WaitPrintf("Deploying yurt-controller-manager") + _, err = node.ExecShellCmd("helm install openyurt %s/openyurt-helm/charts/openyurt -n kube-system", node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to deploy yurt-controller-manager!\n") + + // Setup raven-controller-manager Component + // Clone repository + utils.WaitPrintf("Cloning repo: raven-controller-manager") + _, err = node.ExecShellCmd("git clone --quiet https://github.com/openyurtio/raven-controller-manager.git %s/raven-controller-manager", node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to clone repo: raven-controller-manager!\n") + // Deploy raven-controller-manager + utils.WaitPrintf("Deploying raven-controller-manager") + _, err = node.ExecShellCmd(`pushd %s/raven-controller-manager && + git checkout v0.3.0 && make generate-deploy-yaml && + kubectl apply -f _output/yamls/raven-controller-manager.yaml && + popd`, + node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to deploy raven-controller-manager!\n") + + // Setup raven-agent Component + // Clone repository + utils.WaitPrintf("Cloning repo: raven-agent") + _, err = node.ExecShellCmd("git clone --quiet https://github.com/openyurtio/raven.git %s/raven-agent", node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to clone repo: raven-agent!\n") + // Deploy raven-agent + utils.WaitPrintf("Deploying raven-agent") + _, err = node.ExecShellCmd("pushd %s/raven-agent && git checkout v0.3.0 && FORWARD_NODE_IP=true make deploy && popd", node.Configs.System.TmpDir) + utils.CheckErrorWithMsg(err, "Failed to deploy raven-agent!\n") +} + +// Expand Openyurt to worker node +func (node *Node) YurtMasterExpand(worker *Node) { + node.OnlyExecByMaster() + // Initialize + var err error + var workerAsEdge string + + // Label worker node as cloud/edge + utils.WaitPrintf("Labeling worker node: %s", worker.Configs.System.NodeHostName) + if worker.NodeRole == "edge" { + workerAsEdge = "true" + } else if worker.NodeRole == "cloud" { + workerAsEdge = "false" + } else { + utils.FatalPrintf("worker's role must be edge or cloud, but this node's role is %s", worker.NodeRole) + } + _, err = node.ExecShellCmd("kubectl label node %s openyurt.io/is-edge-worker=%s --overwrite", worker.Configs.System.NodeHostName, workerAsEdge) + utils.CheckErrorWithMsg(err, "Failed to label worker node!\n") + + // Activate the node autonomous mode + utils.WaitPrintf("Activating the node autonomous mode") + _, err = node.ExecShellCmd("kubectl annotate node %s node.beta.openyurt.io/autonomy=true --overwrite", worker.Configs.System.NodeHostName) + utils.CheckErrorWithMsg(err, "Failed to activate the node autonomous mode!\n") + + // Wait for worker node to be Ready + utils.WaitPrintf("Waiting for worker node to be ready") + waitCount := 1 + for { + workerNodeStatus, err := node.ExecShellCmd(`kubectl get nodes | sed -n "/.*%s.*/p" | + sed -n "s/\s*\(\S*\)\s*\(\S*\).*/\2/p"`, + worker.Configs.System.NodeHostName) + utils.CheckErrorWithMsg(err, "Failed to wait for worker node to be ready!\n") + if workerNodeStatus == "Ready" { + utils.SuccessPrintf("\n") + break + } else { + utils.WarnPrintf("Waiting for worker node to be ready [%ds]\n", waitCount) + waitCount += 1 + time.Sleep(time.Second) + } + } + + // Restart pods in the worker node + utils.WaitPrintf("Restarting pods in the worker node") + shellOutput, err := node.ExecShellCmd(GetRestartPodsShell(), worker.Configs.System.NodeHostName) + utils.CheckErrorWithMsg(err, "Failed to restart pods in the worker node!\n") + podsToBeRestarted := strings.Split(shellOutput, "\n") + for _, pods := range podsToBeRestarted { + podsInfo := strings.Split(pods, " ") + utils.WaitPrintf("Restarting pod: %s => %s", podsInfo[0], podsInfo[1]) + _, err = node.ExecShellCmd("kubectl -n %s delete pod %s", podsInfo[0], podsInfo[1]) + utils.CheckErrorWithMsg(err, "Failed to restart pods in the worker node!\n") + } +} + +// Join existing Kubernetes worker node to Openyurt cluster +func (node *Node) YurtWorkerJoin(addr string, port string, token string) { + + // Initialize + var err error + + // Get yurt template from github + yurtTempFilePath, _ := node.DownloadToTmpDir("https://raw.githubusercontent.com/vhive-serverless/vHive/openyurt/scripts/openyurt-deployer/configs/yurtTemplate.yaml") + // Set up Yurthub + utils.WaitPrintf("Setting up Yurthub") + _, err = node.ExecShellCmd( + "cat '%s' | sed -e 's|__kubernetes_master_address__|%s:%s|' -e 's|__bootstrap_token__|%s|' | sudo tee /etc/kubernetes/manifests/yurthub-ack.yaml", + yurtTempFilePath, + addr, + port, + token) + utils.CheckErrorWithMsg(err, "Failed to set up Yurthub!\n") + + // Get kubele template from github + kubletTempFilePath, _ := node.DownloadToTmpDir("https://raw.githubusercontent.com/vhive-serverless/vHive/openyurt/scripts/openyurt-deployer/configs/kubeTemplate.yaml") + // Configure Kubelet + utils.WaitPrintf("Configuring kubelet") + node.ExecShellCmd("sudo mkdir -p /var/lib/openyurt && cat '%s' | sudo tee /var/lib/openyurt/kubelet.conf", kubletTempFilePath) + utils.CheckErrorWithMsg(err, "Failed to configure kubelet!\n") + node.ExecShellCmd(`sudo sed -i "s|KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=\/etc\/kubernetes\/bootstrap-kubelet.conf\ --kubeconfig=\/etc\/kubernetes\/kubelet.conf|KUBELET_KUBECONFIG_ARGS=--kubeconfig=\/var\/lib\/openyurt\/kubelet.conf|g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf`) + utils.CheckErrorWithMsg(err, "Failed to configure kubelet!\n") + node.ExecShellCmd("sudo systemctl daemon-reload && sudo systemctl restart kubelet") + utils.CheckErrorWithMsg(err, "Failed to configure kubelet!\n") +} + +func (node *Node) YurtWorkerClean() { + node.OnlyExecByWorker() + var err error + utils.WaitPrintf("Cleaning openyurt kubelet on node:%s", node.Name) + _, err = node.ExecShellCmd("sudo rm -rf /var/lib/openyurt") + _, err = node.ExecShellCmd("sudo rm /etc/kubernetes/pki/ca.crt") + _, err = node.ExecShellCmd(`sudo sed -i "s|KUBELET_KUBECONFIG_ARGS=--kubeconfig=\/var\/lib\/openyurt\/kubelet.conf|KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=\/etc\/kubernetes\/bootstrap-kubelet.conf\ --kubeconfig=\/etc\/kubernetes\/kubelet.conf|g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf`) + utils.CheckErrorWithMsg(err, "Failed to clean kubelet on node: %s", node.Name) +} + +// Builds cloud and edge nodepools +func (masterNode *Node) BuildDemo(workerNodes []Node) { + + masterNode.GetUserHomeDir() + masterNode.GetNodeHostName() + + var err error + cloudPoolName := masterNode.Configs.Demo.CloudPoolName + edgePoolName := masterNode.Configs.Demo.EdgePoolName + + cloudFile := fmt.Sprintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.CloudYamlFile) + edgeFile := fmt.Sprintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.EdgeYamlFile) + // yurtFile := utils.InfoPrintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.YurtAppSetYamlFile) + + // cloud.yaml + utils.WaitPrintf("Creating yaml files for cloud nodepool") + cloudNpcommand := fmt.Sprintf("yq '.metadata.name = %s' configs/cloudNodePoolTemplate.yaml > %s ", cloudPoolName, cloudFile) + _, err = masterNode.ExecShellCmd(cloudNpcommand) + utils.CheckErrorWithTagAndMsg(err, "Failed to create yaml for cloud\n") + + // edge.yaml + utils.WaitPrintf("Creating yaml files for edge nodepool") + edgeNpcommand := fmt.Sprintf("yq '.metadata.name = %s' configs/edgeNodePoolTemplate.yaml > %s ", edgePoolName, edgeFile) + _, err = masterNode.ExecShellCmd(edgeNpcommand) + utils.CheckErrorWithTagAndMsg(err, "Failed to create yaml for edge\n") + + //label master as cloud TODO not just master, but all cloud nodes + utils.WaitPrintf("Labeling master") + _, err = masterNode.ExecShellCmd(`kubectl label node %s apps.openyurt.io/desired-nodepool=%s`, masterNode.Configs.System.NodeHostName, cloudPoolName) + utils.CheckErrorWithTagAndMsg(err, "Master Cloud label fail\n") + + //label edge + utils.WaitPrintf("Labeling workers") + for _, worker := range workerNodes { + worker.GetNodeHostName() + var desiredNpName string + if worker.NodeRole == "cloud" { + desiredNpName = cloudPoolName + } else { + desiredNpName = edgePoolName + } + _, err = masterNode.ExecShellCmd("kubectl label node %s apps.openyurt.io/desired-nodepool=%s", worker.Configs.System.NodeHostName, desiredNpName) + utils.CheckErrorWithTagAndMsg(err, "worker label fail\n") + } + utils.SuccessPrintf("Label success\n") + + utils.WaitPrintf("Apply cloud.yaml") + _, err = masterNode.ExecShellCmd("kubectl apply -f %s", cloudFile) + utils.CheckErrorWithTagAndMsg(err, "Failed to apply cloud.yaml\n") + + utils.WaitPrintf("Apply edge.yaml") + _, err = masterNode.ExecShellCmd("kubectl apply -f %s", edgeFile) + utils.CheckErrorWithTagAndMsg(err, "Failed to apply edge.yaml\n") +} + +func (masterNode *Node) Demo(isCloud bool) { + + masterNode.GetUserHomeDir() + masterNode.GetNodeHostName() + + var err error + cloudPoolName := masterNode.Configs.Demo.CloudPoolName + edgePoolName := masterNode.Configs.Demo.EdgePoolName + + utils.WaitPrintf("Creating benchmark's yaml file and apply it") + + if isCloud { + command := fmt.Sprintf(`yq '.metadata.name = \"helloworld-python-cloud\" | + .spec.template.spec.nodeSelector.\"apps.openyurt.io/nodepool\" = %s | + .spec.template.spec.containers[0].image = \"docker.io/vhiveease/hello-cloud:latest\" ' + configs/benchmarkTemplate.yaml > %s`, cloudPoolName, masterNode.Configs.Demo.CloudBenchYamlFile) + _, err = masterNode.ExecShellCmd(command) + utils.CheckErrorWithMsg(err, "benchmark command fail.") + _, err = masterNode.ExecShellCmd("kubectl apply -f %s", masterNode.Configs.Demo.CloudBenchYamlFile) + } else { + command := fmt.Sprintf(`yq '.metadata.name = \"helloworld-python-edge\" | + .spec.template.spec.nodeSelector.\"apps.openyurt.io/nodepool\" = %s | + .spec.template.spec.containers[0].image = \"docker.io/vhiveease/hello-edge:latest\" ' + configs/benchmarkTemplate.yaml > %s`, edgePoolName, masterNode.Configs.Demo.EdgeBenchYamlFile) + _, err = masterNode.ExecShellCmd(command) + utils.CheckErrorWithMsg(err, "benchmark command fail.") + _, err = masterNode.ExecShellCmd("kubectl apply -f %s", masterNode.Configs.Demo.EdgeBenchYamlFile) + } + utils.CheckErrorWithTagAndMsg(err, "Failed to create benchmark's yaml file and apply it") + +} + +func (masterNode *Node) PrintDemoInfo(workerNodes []Node, isCloud bool) { + utils.InfoPrintf("NodePool Information:\n") + utils.InfoPrintf("+--------------------------------------------------------------------+\n") + npType := "cloud" + if !isCloud { + npType = "edge" + } + + poolName := masterNode.Configs.Demo.CloudPoolName + if !isCloud { + poolName = masterNode.Configs.Demo.EdgePoolName + } + + utils.InfoPrintf("+%s Nodepool %s:\n", npType, poolName) + utils.InfoPrintf("+Nodes:\n") + if isCloud { + utils.InfoPrintf("+\tnode: %s <- Master\n", masterNode.Configs.System.NodeHostName) + } + for _, worker := range workerNodes { + worker.GetNodeHostName() + if worker.NodeRole == npType { + utils.InfoPrintf("+\tnode: %s\n", worker.Configs.System.NodeHostName) + } + } + + shellOut, _ := masterNode.ExecShellCmd("kubectl get ksvc | grep '\\-%s' | awk '{print $1, substr($2, 8)}'", npType) + var serviceName string + var serviceURL string + splittedOut := strings.Split(shellOut, " ") + if len(splittedOut) != 2 { + serviceName = "Null" + serviceURL = "Null" + } else { + serviceName = splittedOut[0] + serviceURL = splittedOut[1] + } + utils.SuccessPrintf("+Service: Name: [%s] with URL [%s]\n", serviceName, serviceURL) + utils.InfoPrintf("+--------------------------------------------------------------------+\n") + +} diff --git a/scripts/openyurt-deployer/node/demo.go b/scripts/openyurt-deployer/node/demo.go deleted file mode 100644 index 840ff37f9..000000000 --- a/scripts/openyurt-deployer/node/demo.go +++ /dev/null @@ -1,127 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "github.com/vhive-serverless/vHive/scripts/utils" - "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/template" -) - -// Builds cloud and edge nodepools -func (masterNode *Node) BuildDemo(workerNodes []Node) { - - masterNode.GetUserHomeDir() - masterNode.GetNodeHostName() - - var err error - // cloud.yaml - cloudPoolName := masterNode.Configs.Demo.CloudPoolName - edgePoolName := masterNode.Configs.Demo.EdgePoolName - - cloudFile := fmt.Sprintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.CloudYamlFile) - edgeFile := fmt.Sprintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.EdgeYamlFile) - // yurtFile := utils.InfoPrintf("%s/%s", masterNode.Configs.System.UserHomeDir, masterNode.Configs.Demo.YurtAppSetYamlFile) - - createCloudNpTemplate := template.CreateCloudNpTemplate() - utils.WaitPrintf("Creating yaml files for cloud nodepool") - _, err = masterNode.ExecShellCmd(createCloudNpTemplate, cloudPoolName, cloudFile) - utils.CheckErrorWithTagAndMsg(err, "Failed to create yaml for cloud\n") - - // edge.yaml - createEdgeNpTemplate := template.CreateEdgeNpTemplate() - utils.WaitPrintf("Creating yaml files for edge nodepool") - _, err = masterNode.ExecShellCmd(createEdgeNpTemplate, edgePoolName, edgeFile) - utils.CheckErrorWithTagAndMsg(err, "Failed to create yaml for edge\n") - - //label master as cloud TODO not just master, but all cloud nodes - utils.WaitPrintf("Labeling master") - _, err = masterNode.ExecShellCmd(`kubectl label node %s apps.openyurt.io/desired-nodepool=%s`, masterNode.Configs.System.NodeHostName, cloudPoolName) - utils.CheckErrorWithTagAndMsg(err, "Master Cloud label fail\n") - - //label edge - utils.WaitPrintf("Labeling workers") - for _, worker := range workerNodes { - worker.GetNodeHostName() - var desiredNpName string - if worker.NodeRole == "cloud" { - desiredNpName = cloudPoolName - } else { - desiredNpName = edgePoolName - } - _, err = masterNode.ExecShellCmd("kubectl label node %s apps.openyurt.io/desired-nodepool=%s", worker.Configs.System.NodeHostName, desiredNpName) - utils.CheckErrorWithTagAndMsg(err, "worker label fail\n") - } - utils.SuccessPrintf("Label success\n") - - utils.WaitPrintf("Apply cloud.yaml") - _, err = masterNode.ExecShellCmd("kubectl apply -f %s", cloudFile) - utils.CheckErrorWithTagAndMsg(err, "Failed to apply cloud.yaml\n") - - utils.WaitPrintf("Apply edge.yaml") - _, err = masterNode.ExecShellCmd("kubectl apply -f %s", edgeFile) - utils.CheckErrorWithTagAndMsg(err, "Failed to apply edge.yaml\n") -} - -func (masterNode *Node) Demo(isCloud bool) { - - masterNode.GetUserHomeDir() - masterNode.GetNodeHostName() - - var err error - cloudPoolName := masterNode.Configs.Demo.CloudPoolName - edgePoolName := masterNode.Configs.Demo.EdgePoolName - - utils.WaitPrintf("Creating benchmark's yaml file and apply it") - benchmarkTemplate := template.GetBenchmarkTemplate() - if isCloud { - _, err = masterNode.ExecShellCmd(benchmarkTemplate, "cloud", cloudPoolName, "cloud", masterNode.Configs.Demo.CloudBenchYamlFile) - _, err = masterNode.ExecShellCmd("kubectl apply -f %s", masterNode.Configs.Demo.CloudBenchYamlFile) - } else { - _, err = masterNode.ExecShellCmd(benchmarkTemplate, "edge", edgePoolName, "edge", masterNode.Configs.Demo.EdgeBenchYamlFile) - _, err = masterNode.ExecShellCmd("kubectl apply -f %s", masterNode.Configs.Demo.EdgeBenchYamlFile) - } - utils.CheckErrorWithTagAndMsg(err, "Failed to create benchmark's yaml file and apply it") - -} - -func (masterNode *Node) PrintDemoInfo(workerNodes []Node, isCloud bool) { - utils.InfoPrintf("NodePool Information:\n") - utils.InfoPrintf("+--------------------------------------------------------------------+\n") - npType := "cloud" - if !isCloud { - npType = "edge" - } - - poolName := masterNode.Configs.Demo.CloudPoolName - if !isCloud { - poolName = masterNode.Configs.Demo.EdgePoolName - } - - utils.InfoPrintf("+%s Nodepool %s:\n", npType, poolName) - utils.InfoPrintf("+Nodes:\n") - if isCloud { - utils.InfoPrintf("+\tnode: %s <- Master\n", masterNode.Configs.System.NodeHostName) - } - for _, worker := range workerNodes { - worker.GetNodeHostName() - if worker.NodeRole == npType { - utils.InfoPrintf("+\tnode: %s\n", worker.Configs.System.NodeHostName) - } - } - - shellOut, _ := masterNode.ExecShellCmd("kubectl get ksvc | grep '\\-%s' | awk '{print $1, substr($2, 8)}'", npType) - var serviceName string - var serviceURL string - splittedOut := strings.Split(shellOut, " ") - if len(splittedOut) != 2 { - serviceName = "Null" - serviceURL = "Null" - } else { - serviceName = splittedOut[0] - serviceURL = splittedOut[1] - } - utils.SuccessPrintf("+Service: Name: [%s] with URL [%s]\n", serviceName, serviceURL) - utils.InfoPrintf("+--------------------------------------------------------------------+\n") - -} diff --git a/scripts/openyurt-deployer/node/go.mod b/scripts/openyurt-deployer/node/go.mod deleted file mode 100644 index 0102c784d..000000000 --- a/scripts/openyurt-deployer/node/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/vhive-serverless/vhive/scripts/openyurt_deployer/node - -go 1.20 diff --git a/scripts/openyurt-deployer/node/knative.go b/scripts/openyurt-deployer/node/knative.go deleted file mode 100644 index 9323a530d..000000000 --- a/scripts/openyurt-deployer/node/knative.go +++ /dev/null @@ -1,129 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/vhive-serverless/vHive/scripts/utils" -) - -// Install Knative Serving -func (node *Node) InstallKnativeServing() { - node.OnlyExecByMaster() - var err error - - node.CreateTmpDir() - defer node.CleanUpTmpDir() - - // Install and configure MetalLB - utils.WaitPrintf("Installing and configuring MetalLB") - _, err = node.ExecShellCmd(`kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e "s/strictARP: false/strictARP: true/" | kubectl apply -f - -n kube-system`) - utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") - _, err = node.ExecShellCmd("kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v%s/config/manifests/metallb-native.yaml", node.Configs.Knative.MetalLBVersion) - utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") - _, err = node.ExecShellCmd("kubectl -n metallb-system wait deploy controller --timeout=90s --for=condition=Available") - utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") - for _, value := range node.Configs.Knative.MetalLBConfigURLArray { - _, err = node.ExecShellCmd("kubectl apply -f %s", value) - utils.CheckErrorWithMsg(err, "Failed to install and configure MetalLB!") - } - utils.SuccessPrintf("\n") - - // Install istio - // Download istio - utils.WaitPrintf("Downloading istio") - istioFilePath, err := node.DownloadToTmpDir(node.GetIstioDownloadUrl()) - utils.CheckErrorWithTagAndMsg(err, "Failed to download istio!") - // Extract istio - utils.WaitPrintf("Extracting istio") - err = node.ExtractToDir(istioFilePath, "/usr/local", true) - utils.CheckErrorWithTagAndMsg(err, "Failed to extract istio!") - // Update PATH - err = node.AppendDirToPath("/usr/local/istio-%s/bin", node.Configs.Knative.IstioVersion) - utils.CheckErrorWithMsg(err, "Failed to update PATH!") - // Deploy istio operator - utils.WaitPrintf("Deploying istio operator") - operatorConfigPath, err := node.DownloadToTmpDir(node.Configs.Knative.IstioOperatorConfigUrl) - utils.CheckErrorWithMsg(err, "Failed to deploy istio operator!") - _, err = node.ExecShellCmd("/usr/local/istio-%s/bin/istioctl install -y -f %s", node.Configs.Knative.IstioVersion, operatorConfigPath) - utils.CheckErrorWithTagAndMsg(err, "Failed to deploy istio operator!") - - // Install Knative Serving component - utils.WaitPrintf("Installing Knative Serving component") - _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/serving/releases/download/knative-v%s/serving-crds.yaml", node.Configs.Knative.KnativeVersion) - utils.CheckErrorWithMsg(err, "Failed to install Knative Serving component!") - _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/serving/releases/download/knative-v%s/serving-core.yaml", node.Configs.Knative.KnativeVersion) - utils.CheckErrorWithTagAndMsg(err, "Failed to install Knative Serving component!") - - // Install local cluster registry - utils.WaitPrintf("Installing local cluster registry") - _, err = node.ExecShellCmd("kubectl create namespace registry") - utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") - configFilePath, err := node.DownloadToTmpDir("%s", node.Configs.Knative.LocalRegistryVolumeConfigUrl) - utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") - _, err = node.ExecShellCmd("REPO_VOL_SIZE=%s envsubst < %s | kubectl create --filename -", node.Configs.Knative.LocalRegistryRepoVolumeSize, configFilePath) - utils.CheckErrorWithMsg(err, "Failed to install local cluster registry!") - _, err = node.ExecShellCmd("kubectl create -f %s && kubectl apply -f %s", node.Configs.Knative.LocalRegistryDockerRegistryConfigUrl, node.Configs.Knative.LocalRegistryHostUpdateConfigUrl) - utils.CheckErrorWithTagAndMsg(err, "Failed to install local cluster registry!") - - // Configure Magic DNS - utils.WaitPrintf("Configuring Magic DNS") - _, err = node.ExecShellCmd("kubectl apply -f %s", node.Configs.Knative.MagicDNSConfigUrl) - utils.CheckErrorWithTagAndMsg(err, "Failed to configure Magic DNS!") - - // Install networking layer - utils.WaitPrintf("Installing networking layer") - _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v%s/net-istio.yaml", node.Configs.Knative.KnativeVersion) - utils.CheckErrorWithTagAndMsg(err, "Failed to install networking layer!") - - // Logs for verification - _, err = node.ExecShellCmd("kubectl get pods -n knative-serving") - utils.CheckErrorWithMsg(err, "Verification Failed!") - - // // Configure DNS - // logs.WaitPrintf("Configuring DNS") - // _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/serving/releases/download/knative-v%s/serving-default-domain.yaml", node.Configs.Knative.KnativeVersion) - // logs.CheckErrorWithTagAndMsg(err, "Failed to configure DNS!") - - // enable node selector - utils.WaitPrintf("Enable node selector in knative serving") - _, err = node.ExecShellCmd(`kubectl patch cm config-features -n knative-serving \ - --type merge \ - -p '{"data":{"kubernetes.podspec-nodeselector":"enabled"}}' -`) - utils.CheckErrorWithTagAndMsg(err, "Failed to enable node selector in knative serving") - // node.enableNodeSelect() -} - -// Install Knative Eventing -func (node *Node) InstallKnativeEventing() { - // Install Knative Eventing component - utils.WaitPrintf("Installing Knative Eventing component") - _, err := node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/eventing-crds.yaml", node.Configs.Knative.KnativeVersion) - utils.CheckErrorWithMsg(err, "Failed to install Knative Eventing component!") - _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/eventing-core.yaml", node.Configs.Knative.KnativeVersion) - utils.CheckErrorWithTagAndMsg(err, "Failed to install Knative Eventing component!") - - // Logs for verification - _, err = node.ExecShellCmd("kubectl get pods -n knative-eventing") - utils.CheckErrorWithMsg(err, "Verification Failed!") - - // Install a default Channel (messaging) layer - utils.WaitPrintf("Installing a default Channel (messaging) layer") - _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/in-memory-channel.yaml", node.Configs.Knative.KnativeVersion) - utils.CheckErrorWithTagAndMsg(err, "Failed to install a default Channel (messaging) layer!") - - // Install a Broker layer - utils.WaitPrintf("Installing a Broker layer") - _, err = node.ExecShellCmd("kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v%s/mt-channel-broker.yaml", node.Configs.Knative.KnativeVersion) - utils.CheckErrorWithTagAndMsg(err, "Failed to install a Broker layer!") - - // Logs for verification - _, err = node.ExecShellCmd("kubectl --namespace istio-system get service istio-ingressgateway") - utils.CheckErrorWithMsg(err, "Verification Failed!") -} - -// get istio download URL -func (node *Node) GetIstioDownloadUrl() string { - knative := node.Configs.Knative - return fmt.Sprintf(knative.IstioDownloadUrlTemplate, knative.IstioVersion, knative.IstioVersion, node.Configs.System.CurrentArch) -} diff --git a/scripts/openyurt-deployer/node/kube.go b/scripts/openyurt-deployer/node/kube.go deleted file mode 100644 index 821d135ac..000000000 --- a/scripts/openyurt-deployer/node/kube.go +++ /dev/null @@ -1,120 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "github.com/vhive-serverless/vHive/scripts/utils" -) - -// Initialize the master node of Kubernetes cluster -func (node *Node) KubeMasterInit() (string, string, string, string) { - - // Initialize - var err error - node.check_kube_environment() - node.CreateTmpDir() - defer node.CleanUpTmpDir() - - // Pre-pull Image - utils.WaitPrintf("Pre-Pulling required images") - shellCmd := fmt.Sprintf("sudo kubeadm config images pull --kubernetes-version %s ", node.Configs.Kube.K8sVersion) - if len(node.Configs.Kube.AlternativeImageRepo) > 0 { - shellCmd = fmt.Sprintf(shellCmd+"--image-repository %s ", node.Configs.Kube.AlternativeImageRepo) - } - _, err = node.ExecShellCmd(shellCmd) - utils.CheckErrorWithTagAndMsg(err, "Failed to pre-pull required images!\n") - - // Deploy Kubernetes - utils.WaitPrintf("Deploying Kubernetes(version %s)", node.Configs.Kube.K8sVersion) - shellCmd = fmt.Sprintf("sudo kubeadm init --kubernetes-version %s --pod-network-cidr=\"%s\" ", node.Configs.Kube.K8sVersion, node.Configs.Kube.PodNetworkCidr) - if len(node.Configs.Kube.AlternativeImageRepo) > 0 { - shellCmd = fmt.Sprintf(shellCmd+"--image-repository %s ", node.Configs.Kube.AlternativeImageRepo) - } - if len(node.Configs.Kube.ApiserverAdvertiseAddress) > 0 { - shellCmd = fmt.Sprintf(shellCmd+"--apiserver-advertise-address=%s ", node.Configs.Kube.ApiserverAdvertiseAddress) - } - shellCmd = fmt.Sprintf(shellCmd+"| tee %s/masterNodeInfo", node.Configs.System.TmpDir) - _, err = node.ExecShellCmd(shellCmd) - utils.CheckErrorWithTagAndMsg(err, "Failed to deploy Kubernetes(version %s)!\n", node.Configs.Kube.K8sVersion) - - // Make kubectl work for non-root user - utils.WaitPrintf("Making kubectl work for non-root user") - _, err = node.ExecShellCmd("mkdir -p %s/.kube && sudo cp -i /etc/kubernetes/admin.conf %s/.kube/config && sudo chown $(id -u):$(id -g) %s/.kube/config", - node.Configs.System.UserHomeDir, - node.Configs.System.UserHomeDir, - node.Configs.System.UserHomeDir) - utils.CheckErrorWithTagAndMsg(err, "Failed to make kubectl work for non-root user!\n") - - // Install Calico network add-on - utils.WaitPrintf("Installing pod network") - _, err = node.ExecShellCmd("kubectl apply -f %s", node.Configs.Kube.PodNetworkAddonConfigURL) - utils.CheckErrorWithTagAndMsg(err, "Failed to install pod network!\n") - - // Extract master node information from logs - utils.WaitPrintf("Extracting master node information from logs") - shellOut, err := node.ExecShellCmd("sed -n '/.*kubeadm join.*/p' < %s/masterNodeInfo | sed -n 's/.*join \\(.*\\):\\(\\S*\\) --token \\(\\S*\\).*/\\1 \\2 \\3/p'", node.Configs.System.TmpDir) - utils.CheckErrorWithMsg(err, "Failed to extract master node information from logs!\n") - splittedOut := strings.Split(shellOut, " ") - node.Configs.Kube.ApiserverAdvertiseAddress = splittedOut[0] - node.Configs.Kube.ApiserverPort = splittedOut[1] - node.Configs.Kube.ApiserverToken = splittedOut[2] - shellOut, err = node.ExecShellCmd("sed -n '/.*sha256:.*/p' < %s/masterNodeInfo | sed -n 's/.*\\(sha256:\\S*\\).*/\\1/p'", node.Configs.System.TmpDir) - utils.CheckErrorWithTagAndMsg(err, "Failed to extract master node information from logs!\n") - node.Configs.Kube.ApiserverTokenHash = shellOut - - return node.Configs.Kube.ApiserverAdvertiseAddress, - node.Configs.Kube.ApiserverPort, - node.Configs.Kube.ApiserverToken, - node.Configs.Kube.ApiserverTokenHash - -} - -func (node *Node) KubeClean() { - utils.InfoPrintf("Cleaning Kube in node: %s\n", node.Name) - var err error - if node.NodeRole == "master" { - // kubectl cordon {workerNodeName} - // kubectl drain {NodeName} --delete-local-data --force --ignore-daemonsets - // kubectl delete node {NodeName} - - utils.WaitPrintf("Reseting kube cluster and rm .kube file") - // TODO: delete master last, need to check defer can work or not - defer node.ExecShellCmd("sudo kubeadm reset -f && rm -rf $HOME/.kube && rm -rf /etc/cni/net.d") - // The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d - } else { - - utils.WaitPrintf("Reseting kube cluster") - _, err = node.ExecShellCmd("sudo kubeadm reset -f && rm -rf /etc/cni/net.d") - } - utils.CheckErrorWithTagAndMsg(err, "Failed to clean kube cluster!\n") - -} - -// Join worker node to Kubernetes cluster -func (node *Node) KubeWorkerJoin(apiServerAddr string, apiServerPort string, apiServerToken string, apiServerTokenHash string) { - - // Initialize - var err error - - // Join Kubernetes cluster - utils.WaitPrintf("Joining Kubernetes cluster") - _, err = node.ExecShellCmd("sudo kubeadm join %s:%s --token %s --discovery-token-ca-cert-hash %s", apiServerAddr, apiServerPort, apiServerToken, apiServerTokenHash) - utils.CheckErrorWithTagAndMsg(err, "Failed to join Kubernetes cluster!\n") -} - -func (node *Node) check_kube_environment() { - // Temporarily unused -} - -func (node *Node) GetAllNodes() []string { - utils.WaitPrintf("Get all nodes...") - if node.NodeRole != "master" { - utils.ErrorPrintf("GetAllNodes can only be executed on master node!\n") - return []string{} - } - out, err := node.ExecShellCmd("kubectl get nodes | awk 'NR>1 {print $1}'") - utils.CheckErrorWithMsg(err, "Failed to get nodes from cluster!\n") - nodeNames := strings.Split(out, "\n") - return nodeNames -} diff --git a/scripts/openyurt-deployer/node/node.go b/scripts/openyurt-deployer/node/node.go deleted file mode 100644 index fb49a018b..000000000 --- a/scripts/openyurt-deployer/node/node.go +++ /dev/null @@ -1,51 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "github.com/sfreiberg/simplessh" - "github.com/vhive-serverless/vHive/scripts/utils" - "github.com/vhive-serverless/vhive/scripts/openyurt_deployer/configs" -) - -type NodeConfig struct { - System configs.SystemEnvironmentStruct - Kube configs.KubeConfigStruct - Knative configs.KnativeConfigStruct - Yurt configs.YurtEnvironment - Demo configs.DemoEnvironment -} - -type Node struct { - Name string - Client *simplessh.Client - NodeRole string - Configs *NodeConfig -} - -func (node *Node) ExecShellCmd(cmd string, pars ...any) (string, error) { - shellCmd := fmt.Sprintf(cmd, pars...) - out, err := node.Client.Exec(shellCmd) - if err != nil { - utils.WarnPrintf("node: [%s] failed to exec: \n%s\nerror:%s\n", node.Name, shellCmd, out) - } - return strings.TrimSuffix(string(out), "\n"), err -} - -func (node *Node) OnlyExecByMaster() { - if node.NodeRole != "master" { - utils.FatalPrintf("This function can only be executed by master node!\n") - } -} - -func (node *Node) OnlyExecByWorker() { - if node.NodeRole == "master" { - utils.FatalPrintf("This function can only be executed by worker node!\n") - } -} - -func (node *Node) SetMasterAsCloud(asCloud bool) { - node.OnlyExecByMaster() - node.Configs.Yurt.MasterAsCloud = asCloud -} diff --git a/scripts/openyurt-deployer/node/system.go b/scripts/openyurt-deployer/node/system.go deleted file mode 100644 index 493df1054..000000000 --- a/scripts/openyurt-deployer/node/system.go +++ /dev/null @@ -1,368 +0,0 @@ -package node - -import ( - "fmt" - "path" - "strings" - - "github.com/vhive-serverless/vHive/scripts/utils" -) - -type ShellError struct { - msg string - exitCode int -} - -func (err *ShellError) Error() string { - return fmt.Sprintf("[exit %d] -> %s", err.exitCode, err.msg) -} - -// Detect current architecture -func (node *Node) DetectArch() { - utils.WaitPrintf("Detetcting current arch") - out, err := node.ExecShellCmd("dpkg --print-architecture") - utils.CheckErrorWithMsg(err, "Failed to get current arch!\n") - node.Configs.System.CurrentArch = out - switch node.Configs.System.CurrentArch { - default: - utils.InfoPrintf("Detected Arch: %s for node: %s\n", node.Configs.System.CurrentArch, node.Name) - } -} - -// Detect current operating system -func (node *Node) DetectOS() { - switch node.Configs.System.CurrentOS { - case "windows": - utils.FatalPrintf("Unsupported OS: %s\n", node.Configs.System.CurrentOS) - default: - var err error - node.Configs.System.CurrentOS, err = node.ExecShellCmd("sed -n 's/^NAME=\"\\(.*\\)\"/\\1/p' < /etc/os-release | head -1 | tr '[:upper:]' '[:lower:]'") - utils.InfoPrintf("Detected OS: %s\n", node.Configs.System.CurrentOS) - utils.CheckErrorWithMsg(err, "Failed to get Linux distribution info!\n") - switch node.Configs.System.CurrentOS { - case "ubuntu": - default: - utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) - } - utils.InfoPrintf("Detected OS: %s for node: %s\n", strings.TrimSuffix(string(node.Configs.System.CurrentOS), "\n"), node.Name) - } -} - -// Get current directory -func (node *Node) GetCurrentDir() { - var err error - node.Configs.System.CurrentDir, err = node.ExecShellCmd("pwd") - utils.CheckErrorWithMsg(err, "Failed to get get current directory!\n") -} - -// Get current home directory -func (node *Node) GetUserHomeDir() { - var err error - node.Configs.System.UserHomeDir, err = node.ExecShellCmd("echo $HOME") - utils.CheckErrorWithMsg(err, "Failed to get current home directory!\n") -} - -// Get current node's hostname -func (node *Node) GetNodeHostName() { - var err error - node.Configs.System.NodeHostName, err = node.ExecShellCmd("echo $HOSTNAME") - utils.CheckErrorWithMsg(err, "Failed to get current node hostname!\n") -} - -// Create temporary directory -func (node *Node) CreateTmpDir() { - var err error - utils.InfoPrintf("Creating temporary directory") - tmpDir := "~/yurt_tmp" - _, err = node.ExecShellCmd("mkdir -p %s", tmpDir) - node.Configs.System.TmpDir = tmpDir - utils.CheckErrorWithTagAndMsg(err, "Failed to create temporary directory!\n") -} - -// Clean up temporary directory -func (node *Node) CleanUpTmpDir() { - utils.InfoPrintf("Cleaning up temporary directory") - _, err := node.ExecShellCmd("rm -rf %s/*", node.Configs.System.TmpDir) - utils.CheckErrorWithTagAndMsg(err, "Failed to create temporary directory!\n") -} - -// Extract arhive file to specific directory(currently support .tar.gz file only) -func (node *Node) ExtractToDir(filePath string, dirPath string, privileged bool) error { - var err error - if privileged { - _, err = node.ExecShellCmd("sudo tar -xzvf %s -C %s", filePath, dirPath) - } else { - _, err = node.ExecShellCmd("tar -xzvf %s -C %s", filePath, dirPath) - } - return err -} - -// Append directory to PATH variable for bash & zsh -func (node *Node) AppendDirToPath(pathTemplate string, pars ...any) error { - appendedPath := fmt.Sprintf(pathTemplate, pars...) - - // For bash - _, err := node.ExecShellCmd("echo 'export PATH=$PATH:%s' >> %s/.bashrc", appendedPath, node.Configs.System.UserHomeDir) - if err != nil { - return err - } - // For zsh - _, err = node.LookPath("zsh") - if err != nil { - _, err = node.ExecShellCmd("echo 'export PATH=$PATH:%s' >> %s/.zshrc", appendedPath, node.Configs.System.UserHomeDir) - } - return err -} - -// Turn off unattended-upgrades -func (node *Node) TurnOffAutomaticUpgrade() (string, error) { - switch node.Configs.System.CurrentOS { - case "ubuntu": - _, err := node.ExecShellCmd("stat /etc/apt/apt.conf.d/20auto-upgrades") - if err == nil { - return node.ExecShellCmd("sudo sed -i 's/\"1\"/\"0\"/g' /etc/apt/apt.conf.d/20auto-upgrades") - } - return "", nil - default: - return "", nil - } -} - -// Install packages on various OS -func (node *Node) InstallPackages(packagesTemplate string, pars ...any) error { - packages := fmt.Sprintf(packagesTemplate, pars...) - switch node.Configs.System.CurrentOS { - case "ubuntu": - _, err := node.ExecShellCmd("sudo apt-get -qq update && sudo apt-get -qq install -y --allow-downgrades --allow-change-held-packages %s", packages) - return err - case "centos": - _, err := node.ExecShellCmd("sudo dnf -y -q install %s", packages) - return err - case "rocky linux": - _, err := node.ExecShellCmd("sudo dnf -y -q install %s", packages) - return err - default: - utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) - return &ShellError{msg: "Unsupported Linux distribution", exitCode: 1} - } -} - -// Download file to temporary directory (absolute path of downloaded file will be the first return value if successful) -func (node *Node) DownloadToTmpDir(urlTemplate string, pars ...any) (string, error) { - url := fmt.Sprintf(urlTemplate, pars...) - fileName := path.Base(url) - filePath := node.Configs.System.TmpDir + "/" + fileName - _, err := node.ExecShellCmd("curl -sSL --output %s %s", filePath, url) - return filePath, err -} - -func (node *Node) LookPath(path string) (string, error) { - return node.ExecShellCmd("command -v %s", path) -} - -// Check system environment -func (node *Node) CheckSystemEnvironment() { - // Check system environment - utils.InfoPrintf("Checking system environment...\n") - var err error - - // Check Golang - _, err = node.LookPath("go") - if err != nil { - utils.InfoPrintf("Golang not found! Golang(version %s) will be automatically installed!\n", node.Configs.System.GoVersion) - } else { - utils.InfoPrintf("Golang found!\n") - node.Configs.System.GoInstalled = true - } - - // Check Containerd - _, err = node.LookPath("containerd") - if err != nil { - utils.InfoPrintf("Containerd not found! containerd(version %s) will be automatically installed!\n", node.Configs.System.ContainerdVersion) - } else { - utils.InfoPrintf("Containerd found!\n") - node.Configs.System.ContainerdInstalled = true - } - - // Check runc - _, err = node.LookPath("runc") - if err != nil { - utils.InfoPrintf("runc not found! runc(version %s) will be automatically installed!\n", node.Configs.System.RuncVersion) - } else { - utils.InfoPrintf("runc found!\n") - node.Configs.System.RuncInstalled = true - } - - // Check CNI plugins - _, err = node.ExecShellCmd("stat /opt/cni/bin") - if err != nil { - utils.InfoPrintf("CNI plugins not found! CNI plugins(version %s) will be automatically installed!\n", node.Configs.System.CniPluginsVersion) - } else { - utils.InfoPrintf("CNI plugins found!\n") - node.Configs.System.CniPluginsInstalled = true - } - - // Add OS-specific dependencies to installation lists - switch node.Configs.System.CurrentOS { - case "ubuntu": - node.Configs.System.Dependencies = "git wget curl build-essential apt-transport-https ca-certificates" - case "rocky linux": - node.Configs.System.Dependencies = "" - case "centos": - node.Configs.System.Dependencies = "" - default: - utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) - } - - utils.InfoPrintf("Finish checking system environment!\n") -} - -func (node *Node) ReadSystemInfo() { - node.DetectOS() - node.DetectArch() - node.GetCurrentDir() - node.GetUserHomeDir() - node.GetNodeHostName() - node.CheckSystemEnvironment() -} - -// Initialize system environment -func (node *Node) SystemInit() { - utils.InfoPrintf("Start init system environment for node:%s\n", node.Name) - // Initialize - - var err error - - // node.ReadSystemInfo() // technically, this is not necessary - node.CreateTmpDir() - defer node.CleanUpTmpDir() - - // Turn off unattended-upgrades on ubuntu - utils.InfoPrintf("Turning off automatic upgrade") - _, err = node.TurnOffAutomaticUpgrade() - utils.CheckErrorWithTagAndMsg(err, "Failed to turn off automatic upgrade!\n") - - // Disable swap - utils.InfoPrintf("Disabling swap") - _, err = node.ExecShellCmd("sudo swapoff -a && sudo cp /etc/fstab /etc/fstab.old") // Turn off Swap && Backup fstab file - utils.CheckErrorWithTagAndMsg(err, "Failed to disable swap!\n") - - utils.InfoPrintf("Modifying fstab") - // Modify fstab to disable swap permanently - _, err = node.ExecShellCmd("sudo sed -i 's/#\\s*\\(.*swap.*\\)/\\1/g' /etc/fstab && sudo sed -i 's/.*swap.*/# &/g' /etc/fstab") - utils.CheckErrorWithTagAndMsg(err, "Failed to dodify fstab!\n") - - // Install dependencies - utils.InfoPrintf("Installing dependencies") - err = node.InstallPackages(node.Configs.System.Dependencies) - utils.CheckErrorWithTagAndMsg(err, "Failed to install dependencies!\n") - - // Install Golang - if !node.Configs.System.GoInstalled { - // Download & Extract Golang - utils.InfoPrintf("Downloading Golang(ver %s)", node.Configs.System.GoVersion) - filePathName, err := node.DownloadToTmpDir(node.Configs.System.GoDownloadUrlTemplate, node.Configs.System.GoVersion, node.Configs.System.CurrentArch) - utils.CheckErrorWithTagAndMsg(err, "Failed to download Golang(ver %s)!\n", node.Configs.System.GoVersion) - utils.InfoPrintf("Extracting Golang") - _, err = node.ExecShellCmd("sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf %s", filePathName) - utils.CheckErrorWithTagAndMsg(err, "Failed to extract Golang!\n") - - // For bash - _, err = node.ExecShellCmd("echo 'export PATH=$PATH:/usr/local/go/bin' >> %s/.bashrc", node.Configs.System.UserHomeDir) - utils.CheckErrorWithMsg(err, "Failed to update PATH!\n") - // For zsh - _, err = node.LookPath("zsh") - if err != nil { - _, err = node.ExecShellCmd("echo 'export PATH=$PATH:/usr/local/go/bin' >> %s/.zshrc", node.Configs.System.UserHomeDir) - utils.CheckErrorWithMsg(err, "Failed to update PATH!\n") - } - } - - // Install containerd - if !node.Configs.System.ContainerdInstalled { - // Download containerd - utils.InfoPrintf("Downloading containerd(ver %s)", node.Configs.System.ContainerdVersion) - filePathName, err := node.DownloadToTmpDir( - node.Configs.System.ContainerdDownloadUrlTemplate, - node.Configs.System.ContainerdVersion, - node.Configs.System.ContainerdVersion, - node.Configs.System.CurrentArch) - utils.CheckErrorWithTagAndMsg(err, "Failed to Download containerd(ver %s)\n", node.Configs.System.ContainerdVersion) - // Extract containerd - utils.InfoPrintf("Extracting containerd") - _, err = node.ExecShellCmd("sudo tar Cxzvf /usr/local %s", filePathName) - utils.CheckErrorWithTagAndMsg(err, "Failed to extract containerd!\n") - // Start containerd via systemd - utils.InfoPrintf("Downloading systemd profile for containerd") - filePathName, err = node.DownloadToTmpDir(node.Configs.System.ContainerdSystemdProfileDownloadUrl) - utils.CheckErrorWithTagAndMsg(err, "Failed to download systemd profile for containerd!\n") - utils.InfoPrintf("Starting containerd via systemd") - _, err = node.ExecShellCmd("sudo cp %s /lib/systemd/system/ && sudo systemctl daemon-reload && sudo systemctl enable --now containerd", filePathName) - utils.CheckErrorWithTagAndMsg(err, "Failed to start containerd via systemd!\n") - } - - // Install runc - if !node.Configs.System.RuncInstalled { - // Download runc - utils.InfoPrintf("Downloading runc(ver %s)", node.Configs.System.RuncVersion) - filePathName, err := node.DownloadToTmpDir( - node.Configs.System.RuncDownloadUrlTemplate, - node.Configs.System.RuncVersion, - node.Configs.System.CurrentArch) - utils.CheckErrorWithTagAndMsg(err, "Failed to download runc(ver %s)!\n", node.Configs.System.RuncVersion) - // Install runc - utils.InfoPrintf("Installing runc") - _, err = node.ExecShellCmd("sudo install -m 755 %s /usr/local/sbin/runc", filePathName) - utils.CheckErrorWithTagAndMsg(err, "Failed to install runc!\n") - } - - // Install CNI plugins - if !node.Configs.System.CniPluginsInstalled { - utils.InfoPrintf("Downloading CNI plugins(ver %s)", node.Configs.System.CniPluginsVersion) - filePathName, err := node.DownloadToTmpDir( - node.Configs.System.CniPluginsDownloadUrlTemplate, - node.Configs.System.CniPluginsVersion, - node.Configs.System.CurrentArch, - node.Configs.System.CniPluginsVersion) - utils.CheckErrorWithTagAndMsg(err, "Failed to download CNI plugins(ver %s)!\n", node.Configs.System.CniPluginsVersion) - utils.InfoPrintf("Extracting CNI plugins") - _, err = node.ExecShellCmd("sudo mkdir -p /opt/cni/bin && sudo tar Cxzvf /opt/cni/bin %s", filePathName) - utils.CheckErrorWithTagAndMsg(err, "Failed to extract CNI plugins!\n") - } - - // Configure the systemd cgroup driver - utils.InfoPrintf("Configuring the systemd cgroup driver") - _, err = node.ExecShellCmd( - "containerd config default > %s && sudo mkdir -p /etc/containerd && sudo cp %s /etc/containerd/config.toml && sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml && sudo systemctl restart containerd", - node.Configs.System.TmpDir+"/config.toml", - node.Configs.System.TmpDir+"/config.toml") - utils.CheckErrorWithTagAndMsg(err, "Failed to configure the systemd cgroup driver!\n") - - // Enable IP forwading & br_netfilter - utils.InfoPrintf("Enabling IP forwading & br_netfilter") - _, err = node.ExecShellCmd("sudo modprobe br_netfilter && sudo modprobe overlay && sudo sysctl -w net.ipv4.ip_forward=1 && sudo sysctl -w net.ipv4.conf.all.forwarding=1 && sudo sysctl -w net.bridge.bridge-nf-call-iptables=1 && sudo sysctl -w net.bridge.bridge-nf-call-ip6tables=1") - utils.CheckErrorWithTagAndMsg(err, "Failed to enable IP forwading & br_netfilter!\n") - // Ensure Boot-Resistant - utils.InfoPrintf("Ensuring Boot-Resistant") - _, err = node.ExecShellCmd("echo 'br_netfilter' | sudo tee /etc/modules-load.d/netfilter.conf && echo 'overlay' | sudo tee -a /etc/modules-load.d/netfilter.conf && sudo sed -i 's/# *net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf && sudo sed -i 's/net.ipv4.ip_forward=0/net.ipv4.ip_forward=1/g' /etc/sysctl.conf && echo 'net.bridge.bridge-nf-call-iptables=1\nnet.bridge.bridge-nf-call-ip6tables=1\nnet.ipv4.conf.all.forwarding=1' | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf") - utils.CheckErrorWithTagAndMsg(err, "Failed to ensure Boot-Resistant!\n") - - // Install kubeadm, kubelet, kubectl - switch node.Configs.System.CurrentOS { - case "ubuntu": - // Download Google Cloud public signing key and Add the Kubernetes apt repository - utils.InfoPrintf("Adding the Kubernetes apt repository") - _, err = node.ExecShellCmd("sudo mkdir -p /etc/apt/keyrings && sudo curl -fsSLo /etc/apt/keyrings/kubernetes-archive-keyring.gpg https://dl.k8s.io/apt/doc/apt-key.gpg && echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list") - utils.CheckErrorWithTagAndMsg(err, "Failed to add the Kubernetes apt repository!\n") - // Install kubeadm, kubelet, kubectl via apt - utils.InfoPrintf("Installing kubeadm, kubelet, kubectl") - err = node.InstallPackages("kubeadm=%s kubelet=%s kubectl=%s", node.Configs.System.KubeVersion, node.Configs.System.KubeVersion, node.Configs.System.KubeVersion) - utils.CheckErrorWithTagAndMsg(err, "Failed to install kubeadm, kubelet, kubectl!\n") - // Lock kubeadm, kubelet, kubectl version - utils.InfoPrintf("Locking kubeadm, kubelet, kubectl version") - _, err = node.ExecShellCmd("sudo apt-mark hold kubelet kubeadm kubectl") - utils.CheckErrorWithTagAndMsg(err, "Failed to lock kubeadm, kubelet, kubectl version!\n") - default: - utils.FatalPrintf("Unsupported Linux distribution: %s\n", node.Configs.System.CurrentOS) - } -} diff --git a/scripts/openyurt-deployer/node_test.go b/scripts/openyurt-deployer/node_test.go new file mode 100644 index 000000000..b13ad1d0a --- /dev/null +++ b/scripts/openyurt-deployer/node_test.go @@ -0,0 +1,122 @@ +// MIT License +// +// Copyright (c) 2023 Jason Chua, Ruiqi Lai and vHive team +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package main + +import ( + "testing" +) + +func TestDetectArchofVM(t *testing.T) { + // Call the method to be tested + mockNode.DetectArch() + + if mockNode.Configs.System.CurrentArch == "" { + t.Errorf("Expected CurrentArch not supposed to be empty.\n") + } + t.Logf("CurrentArch: %v", mockNode.Configs.System.CurrentArch) +} + +func TestDetectOSofVM(t *testing.T) { + // Call the method to be tested + mockNode.DetectOS() + + if mockNode.Configs.System.CurrentOS == "" { + t.Errorf("Expected CurrentOS not supposed to be empty.\n") + } + t.Logf("CurrentOS: %v", mockNode.Configs.System.CurrentOS) +} + +func TestGetCurrentDir(t *testing.T) { + // Call the method to be tested + mockNode.GetCurrentDir() + + if mockNode.Configs.System.CurrentDir == "" { + t.Errorf("Expected CurrentDir not supposed to be empty.\n") + } + t.Logf("CurrentDir: %v", mockNode.Configs.System.CurrentDir) +} + +func TestGetUserHomeDir(t *testing.T) { + // Call the method to be tested + mockNode.GetUserHomeDir() + + if mockNode.Configs.System.UserHomeDir == "" { + t.Errorf("Expected UserHomeDir not supposed to be empty.\n") + } + t.Logf("UserHomeDir: %v", mockNode.Configs.System.UserHomeDir) +} + +func TestGetNodeHostName(t *testing.T) { + // Call the method to be tested + mockNode.GetNodeHostName() + + if mockNode.Configs.System.NodeHostName == "" { + t.Errorf("Expected NodeHostName not supposed to be empty.\n") + } + t.Logf("NodeHostName: %v", mockNode.Configs.System.NodeHostName) +} + +func TestCreateTmpDir(t *testing.T) { + // Call the method to be tested + mockNode.CreateTmpDir() + + result, _ := mockNode.ExecShellCmd("ls | grep yurt_tmp") + + if result != "yurt_tmp" { + t.Errorf("Temp file creation test fail.\n") + } + t.Logf("Result: %v", result) +} + +func TestExtractingToTargetDir(t *testing.T) { + // Create the mock tar.gz file + mockNode.ExecShellCmd("mkdir $HOME/projects/ $HOME/temp/ && cd projects && touch mockFile-1 mockFile-2 && cd ..") + mockNode.ExecShellCmd("tar -czvf projects.tar.gz -C projects .") + + // Create tmp dir to extract + mockNode.ExtractToDir("projects.tar.gz", "$HOME/temp", false) + + result, _ := mockNode.ExecShellCmd("ls temp | wc -l") + t.Logf("Result for file count: %v", result) + + // // Remove mock file nand tmp dir + mockNode.ExecShellCmd("rm -rf $HOME/projects/ $HOME/temp/ projects.tar.gz") + + if result != "2" { + t.Errorf("Expected file is 2, returned value is %s.\n", result) + } + +} + +func TestDownloadingToTmpDir(t *testing.T) { + + mockNode.Configs.System.TmpDir = "~/mockDir" + + mockNode.ExecShellCmd("mkdir %s", mockNode.Configs.System.TmpDir) + + filePath, _ := mockNode.DownloadToTmpDir("https://go.dev/dl/go1.21.5.linux-arm64.tar.gz") + + mockNode.ExecShellCmd("rm -rf %s", mockNode.Configs.System.TmpDir) + + t.Logf("FilePath returned: %s", filePath) +} diff --git a/scripts/openyurt-deployer/openyurt_deployer b/scripts/openyurt-deployer/openyurt_deployer deleted file mode 100755 index 6d05d8931..000000000 Binary files a/scripts/openyurt-deployer/openyurt_deployer and /dev/null differ diff --git a/scripts/openyurt-deployer/template/shellTemplate.go b/scripts/openyurt-deployer/template.go similarity index 71% rename from scripts/openyurt-deployer/template/shellTemplate.go rename to scripts/openyurt-deployer/template.go index 5fd995a86..d9fa6e2e5 100644 --- a/scripts/openyurt-deployer/template/shellTemplate.go +++ b/scripts/openyurt-deployer/template.go @@ -1,4 +1,4 @@ -package template +package main const ( restartPodsShellTemplate = `existingPods=$(kubectl get pod -A -o wide | grep %s) @@ -15,6 +15,12 @@ done <<< ${existingPods} IFS=${originalIFS}` ) +var vHiveConfigsURL = "https://raw.githubusercontent.com/anshalshukla/vHive/release-1.9/configs" + func GetRestartPodsShell() string { return restartPodsShellTemplate } + +func GetNetworkAddonConfigURL() string { + return vHiveConfigsURL + "/calico/canal.yaml" +} diff --git a/scripts/openyurt-deployer/template/benchmarkTemplate.go b/scripts/openyurt-deployer/template/benchmarkTemplate.go deleted file mode 100644 index d362117bd..000000000 --- a/scripts/openyurt-deployer/template/benchmarkTemplate.go +++ /dev/null @@ -1,24 +0,0 @@ -package template - -// adopted from vSwarm[https://github.com/vhive-serverless/vSwarm/blob/main/benchmarks/aes/yamls/knative/kn-aes-python.yaml] -const ( - benchmarkTemplate = `echo 'apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-python-%s - namespace: default -spec: - template: - spec: - nodeSelector: - apps.openyurt.io/nodepool: %s - containers: - - image: docker.io/vhiveease/hello-%s:latest - ports: - - name: h2c - containerPort: 50000' > %s` -) - -func GetBenchmarkTemplate() string { - return benchmarkTemplate -} diff --git a/scripts/openyurt-deployer/template/cloudNodePoolTemplate.go b/scripts/openyurt-deployer/template/cloudNodePoolTemplate.go deleted file mode 100644 index 90d255146..000000000 --- a/scripts/openyurt-deployer/template/cloudNodePoolTemplate.go +++ /dev/null @@ -1,14 +0,0 @@ -package template - -const ( - cloudTemplate = `echo 'apiVersion: apps.openyurt.io/v1beta1 -kind: NodePool -metadata: - name: %s -spec: - type: Cloud' > %s` -) - -func CreateCloudNpTemplate() string { - return cloudTemplate -} diff --git a/scripts/openyurt-deployer/template/edgeNodePoolTemplate.go b/scripts/openyurt-deployer/template/edgeNodePoolTemplate.go deleted file mode 100644 index 6f7a7c337..000000000 --- a/scripts/openyurt-deployer/template/edgeNodePoolTemplate.go +++ /dev/null @@ -1,14 +0,0 @@ -package template - -const ( - edgeTemplate = `echo 'apiVersion: apps.openyurt.io/v1beta1 -kind: NodePool -metadata: - name: %s -spec: - type: Edge' > %s` -) - -func CreateEdgeNpTemplate() string { - return edgeTemplate -} diff --git a/scripts/openyurt-deployer/template/go.mod b/scripts/openyurt-deployer/template/go.mod deleted file mode 100644 index 8d6ec5fe1..000000000 --- a/scripts/openyurt-deployer/template/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/vhive-serverless/vhive/scripts/openyurt_deployer/template - -go 1.20 \ No newline at end of file diff --git a/scripts/openyurt-deployer/template/template.go b/scripts/openyurt-deployer/template/template.go deleted file mode 100644 index 1c73d9cc5..000000000 --- a/scripts/openyurt-deployer/template/template.go +++ /dev/null @@ -1,3 +0,0 @@ -package template - -var vHiveConfigsURL = "https://raw.githubusercontent.com/anshalshukla/vHive/release-1.9/configs" diff --git a/scripts/openyurt-deployer/test_configs.go b/scripts/openyurt-deployer/test_configs.go new file mode 100644 index 000000000..269ea5f31 --- /dev/null +++ b/scripts/openyurt-deployer/test_configs.go @@ -0,0 +1,31 @@ +package main + +// criteria table for testing ParsingNodeDependencyVersion +var criteriaTable = map[string]string{ + "Golang": "1.19.10", + "containerd": "1.6.18", + "runc": "1.1.4", + "CNI": "1.2.0", +} + +// mock node info +var mockNodesInfo = NodesInfo{ + Master: "runner@127.0.0.1", +} + +// data for github runner to ssh +var githubRunner = "runner@127.0.0.1" + +// mock node data structure +var mockNode = Node{ + Name: githubRunner, + Client: SetupSSHConn(githubRunner), + NodeRole: "master", + Configs: &NodeConfig{ + System: System, + Kube: Kube, + Knative: Knative, + Yurt: Yurt, + Demo: Demo, + }, +} diff --git a/scripts/openyurt-deployer/unit_test_workflow.sh b/scripts/openyurt-deployer/unit_test_workflow.sh new file mode 100644 index 000000000..be118798a --- /dev/null +++ b/scripts/openyurt-deployer/unit_test_workflow.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +echo "PermitRootLogin=yes" | sudo tee -a /etc/ssh/sshd_config + +sudo apt-get update -qq +sudo apt-get install -qq -y openssh-server +sudo service ssh start +eval "$(ssh-agent -s)" + +ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa -N "" > /dev/null +cat > ~/.ssh/config < ~/.ssh/authorized_keys + +sudo service ssh restart + +# add private key to ssh agent +ssh-add ~/.ssh/id_rsa + +chmod og-rw ~/.ssh + +go test -timeout 5m