From 969b8e70ddc6c11577a01f1583d1f6112eb1fde7 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Wed, 23 Jun 2021 13:15:03 +0200 Subject: [PATCH] Test all CNI in CI --- .dockerignore | 1 - .drone.yml | 55 +++++++++++++++++++++++++++++++++++++++++++++ dind/dind.go | 1 + scripts/integration | 21 ++++++++++++----- 4 files changed, 71 insertions(+), 7 deletions(-) diff --git a/.dockerignore b/.dockerignore index 6e43c2a99..70e5bcb52 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,3 @@ -./bin ./.dapper ./dist ./.trash-cache diff --git a/.drone.yml b/.drone.yml index 7108b914a..274d68c04 100644 --- a/.drone.yml +++ b/.drone.yml @@ -20,6 +20,7 @@ steps: - name: socket path: /var/run/docker.sock + - name: stage-binaries pull: default image: rancher/dapper:1.11.2 @@ -77,3 +78,57 @@ volumes: - name: socket host: path: /var/run/docker.sock + +--- +kind: pipeline +name: test-cni + +platform: + os: linux + arch: amd64 + +steps: +- name: build + pull: default + image: rancher/dapper:1.11.2 + commands: + - dapper build + privileged: true + volumes: + - name: socket + path: /var/run/docker.sock + +- name: integration-flannel + pull: default + image: rancher/dapper:1.11.2 + commands: + - dapper integration flannel + privileged: true + volumes: + - name: socket + path: /var/run/docker.sock + +- name: integration-calico + pull: default + image: rancher/dapper:1.11.2 + commands: + - dapper integration calico + privileged: true + volumes: + - name: socket + path: /var/run/docker.sock + +- name: integration-weave + pull: default + image: rancher/dapper:1.11.2 + commands: + - dapper integration weave + privileged: true + volumes: + - name: socket + path: /var/run/docker.sock + +volumes: +- name: socket + host: + path: /var/run/docker.sock diff --git a/dind/dind.go b/dind/dind.go index 83b86b41f..5a0fd50b3 100644 --- a/dind/dind.go +++ b/dind/dind.go @@ -47,6 +47,7 @@ func StartUpDindContainer(ctx context.Context, dindAddress, dindNetwork, dindSto } binds := []string{ fmt.Sprintf("/var/lib/kubelet-%s:/var/lib/kubelet:shared", containerName), + "/etc/machine-id:/etc/machine-id:ro", } isLink, err := util.IsSymlink("/etc/resolv.conf") if err != nil { diff --git a/scripts/integration b/scripts/integration index 489b81fc5..dfc974abe 100755 --- a/scripts/integration +++ b/scripts/integration @@ -1,4 +1,5 @@ #!/bin/bash +NETWORK_PLUGIN=$1 function kubectlinstall { k8sversion=$1 @@ -46,7 +47,6 @@ source $(dirname $0)/version cd $(dirname $0)/.. - # Get latest version from rke all_versions=$(./bin/rke --quiet config --all --list-version | sort -V) @@ -69,7 +69,7 @@ for ver in "${!versions_to_test[@]}"; do echo_with_time "Testing version ${version_to_test}" # Create cluster yaml with random node names - node=$(cat /dev/urandom | tr -dc a-z | head -c${1:-8}) + node=$(cat /dev/urandom | tr -dc a-z | head -c8) cat << EOF > "./bin/cluster-${version_to_test}.yml" kubernetes_version: ${version_to_test} nodes: @@ -78,6 +78,11 @@ nodes: user: ubuntu EOF + if [ "x${NETWORK_PLUGIN}" != "x" ]; then + echo_with_time "Network plugin specified: ${NETWORK_PLUGIN}" + echo -e "network:\n plugin: ${NETWORK_PLUGIN}" >> ./bin/cluster-${version_to_test}.yml + fi + # Run rke - output to logs and track results. ./bin/rke up --dind --config "./bin/cluster-${version_to_test}.yml" 2>&1 >"./bin/cluster-${version_to_test}.log" & pids="$pids $!" @@ -125,18 +130,20 @@ for pid in "${!pid_results[@]}"; do if [ $? -ne 0 ]; then echo_with_time "[FAIL] Rollout of $kind $name in namespace $namespace for ${pid_to_version} did not complete in 5 minutes" rollout_results["${pid}"]="1" - /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' - /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" get pods --all-namespaces else echo_with_time "[OK] Rollout of $kind $name in namespace $namespace for ${pid_to_version} complete" rollout_results["${pid}"]="0" fi done <<<$(/usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" -n $namespace get deploy,daemonset --no-headers -o custom-columns=NAME:.metadata.name,KIND:.kind --no-headers) done + /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' + /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" get pods --all-namespaces + /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort -u else rkeup_results["${pid}"]="1" /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" get pods --all-namespaces + /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_to_version}.yml" get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort -u fi done @@ -232,18 +239,20 @@ for pid in "${!pid_upgrade_results[@]}"; do if [ $? -ne 0 ]; then echo_with_time "[FAIL] Rollout of $kind $name in namespace $namespace for upgrade ${pid_upgrade_to_version} to "${upgraded_version}" did not complete in 5 minutes" rollout_upgrade_results["${pid}"]="1" - /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_upgrade_to_version}.yml" get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' - /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_upgrade_to_version}.yml" get pods --all-namespaces else echo_with_time "[OK] Rollout of $kind $name in namespace $namespace for upgrade ${pid_upgrade_to_version} to "${upgraded_version}" complete" rollout_upgrade_results["${pid}"]="0" fi done <<<$(/usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_upgrade_to_version}.yml" -n $namespace get deploy,daemonset --no-headers -o custom-columns=NAME:.metadata.name,KIND:.kind --no-headers) done + /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_upgrade_to_version}.yml" get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' + /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_upgrade_to_version}.yml" get pods --all-namespaces + /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pid_upgrade_to_version}.yml" get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort -u else rkeup_upgrade_results["${pid}"]="1" /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pids_to_version["${pid}"]}.yml" get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pids_to_version["${pid}"]}.yml" get pods --all-namespaces + /usr/local/bin/kubectl-${clusterk8sversion} --kubeconfig "./bin/kube_config_cluster-${pids_to_version["${pid}"]}.yml" get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort -u fi done