Skip to content

Commit

Permalink
ci: add more slo controllers e2e test (#1688)
Browse files Browse the repository at this point in the history
Signed-off-by: saintube <[email protected]>
  • Loading branch information
saintube authored Oct 9, 2023
1 parent a317f1e commit 77f919d
Show file tree
Hide file tree
Showing 13 changed files with 524 additions and 90 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/e2e-k8s-1.22.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ jobs:
run: |
set -ex
kubectl cluster-info
IMG=koordinator-sh/koord-manager:e2e-${GITHUB_RUN_ID} ./hack/deploy_kind.sh
IMG=koordinator-sh/koord-manager:e2e-${GITHUB_RUN_ID} KUBERNETES_VERSION="1.22" ./hack/deploy_kind.sh
NODES=$(kubectl get node | wc -l)
for ((i=1;i<10;i++));
do
Expand Down Expand Up @@ -88,7 +88,7 @@ jobs:
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
EXTRA_ARGS="-koordinator-component-namespace=${COMPONENT_NS} -allowed-not-ready-nodes=1 -system-pods-startup-timeout=10s"
EXTRA_ARGS="-koordinator-component-namespace=${COMPONENT_NS} -allowed-not-ready-nodes=1 -system-pods-startup-timeout=10s -e2e-verify-service-account=false"
./bin/ginkgo -timeout 60m -v --focus='slo-controller' test/e2e -- ${EXTRA_ARGS}
retVal=$?
restartCount=$(kubectl get pod -n ${COMPONENT_NS} -l koord-app=koord-manager --no-headers | head -n 1 | awk '{print $4}')
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/e2e-k8s-1.24.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ jobs:
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
EXTRA_ARGS="-koordinator-component-namespace=${COMPONENT_NS} -allowed-not-ready-nodes=1 -system-pods-startup-timeout=10s"
EXTRA_ARGS="-koordinator-component-namespace=${COMPONENT_NS} -allowed-not-ready-nodes=1 -system-pods-startup-timeout=10s -e2e-verify-service-account=false"
./bin/ginkgo -timeout 60m -v --focus='slo-controller' test/e2e -- ${EXTRA_ARGS}
retVal=$?
restartCount=$(kubectl get pod -n ${COMPONENT_NS} -l koord-app=koord-manager --no-headers | head -n 1 | awk '{print $4}')
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/e2e-k8s-latest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ jobs:
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
EXTRA_ARGS="-koordinator-component-namespace=${COMPONENT_NS} -allowed-not-ready-nodes=1 -system-pods-startup-timeout=10s"
EXTRA_ARGS="-koordinator-component-namespace=${COMPONENT_NS} -allowed-not-ready-nodes=1 -system-pods-startup-timeout=10s -e2e-verify-service-account=false"
./bin/ginkgo -timeout 60m -v --focus='slo-controller' test/e2e -- ${EXTRA_ARGS}
retVal=$?
restartCount=$(kubectl get pod -n ${COMPONENT_NS} -l koord-app=koord-manager --no-headers | head -n 1 | awk '{print $4}')
Expand Down
2 changes: 1 addition & 1 deletion config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ spec:
- --leader-election-namespace=koordinator-system
- --config-namespace=koordinator-system
- --v=4
- --feature-gates=AllAlpha=false,AllBeta=false
- --feature-gates=
- --sync-period=0
command:
- /koord-manager
Expand Down
2 changes: 1 addition & 1 deletion config/webhook/manifests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ webhooks:
name: webhook-service
namespace: system
path: /validate-node
failurePolicy: Fail
failurePolicy: Ignore
name: vnode.koordinator.sh
rules:
- apiGroups:
Expand Down
17 changes: 15 additions & 2 deletions hack/deploy_kind.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,24 @@ if [ -z "$IMG" ]; then
exit 1
fi

K8S_VERSION=""
if [ -z "$KUBERNETES_VERSION" ]; then
K8S_VERSION="latest"
else
K8S_VERSION=$KUBERNETES_VERSION
fi

set -e

make kustomize
KUSTOMIZE=$(pwd)/bin/kustomize
(cd config/manager && "${KUSTOMIZE}" edit set image manager="${IMG}")
"${KUSTOMIZE}" build config/default | sed -e 's/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g' > /tmp/koordinator-kustomization.yaml
echo -e "resources:\n- manager.yaml" > config/manager/kustomization.yaml

if [[ "$K8S_VERSION" == "1.22" ]]; then
sed "s/feature-gates=/feature-gates=CompatibleCSIStorageCapacity=true/g" $(pwd)/config/manager/scheduler.yaml > /tmp/scheduler.yaml && mv /tmp/scheduler.yaml $(pwd)/config/manager/scheduler.yaml
$(pwd)/hack/kustomize.sh "${KUSTOMIZE}" | sed -e 's/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g' > /tmp/koordinator-kustomization.yaml
else
$(pwd)/hack/kustomize.sh "${KUSTOMIZE}" | sed -e 's/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g' > /tmp/koordinator-kustomization.yaml
fi

kubectl apply -f /tmp/koordinator-kustomization.yaml
2 changes: 1 addition & 1 deletion pkg/webhook/node/validating/webhooks.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)

// +kubebuilder:webhook:path=/validate-node,mutating=false,failurePolicy=fail,sideEffects=None,groups="",resources=nodes,verbs=create;update,versions=v1,name=vnode.koordinator.sh,admissionReviewVersions=v1;v1beta1
// +kubebuilder:webhook:path=/validate-node,mutating=false,failurePolicy=ignore,sideEffects=None,groups="",resources=nodes,verbs=create;update,versions=v1,name=vnode.koordinator.sh,admissionReviewVersions=v1;v1beta1

var (
// HandlerMap contains admission webhook handlers
Expand Down
39 changes: 21 additions & 18 deletions test/e2e/framework/test_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,24 +53,24 @@ const (
// into the code which uses the settings.
//
// The recommendation for those settings is:
// - They are stored in their own context structure or local
// variables.
// - The standard `flag` package is used to register them.
// The flag name should follow the pattern <part1>.<part2>....<partn>
// where the prefix is unlikely to conflict with other tests or
// standard packages and each part is in lower camel case. For
// example, test/e2e/storage/csi/context.go could define
// storage.csi.numIterations.
// - framework/config can be used to simplify the registration of
// multiple options with a single function call:
// var storageCSI {
// NumIterations `default:"1" usage:"number of iterations"`
// }
// _ config.AddOptions(&storageCSI, "storage.csi")
// - The direct use Viper in tests is possible, but discouraged because
// it only works in test suites which use Viper (which is not
// required) and the supported options cannot be
// discovered by a test suite user.
// - They are stored in their own context structure or local
// variables.
// - The standard `flag` package is used to register them.
// The flag name should follow the pattern <part1>.<part2>....<partn>
// where the prefix is unlikely to conflict with other tests or
// standard packages and each part is in lower camel case. For
// example, test/e2e/storage/csi/context.go could define
// storage.csi.numIterations.
// - framework/config can be used to simplify the registration of
// multiple options with a single function call:
// var storageCSI {
// NumIterations `default:"1" usage:"number of iterations"`
// }
// _ config.AddOptions(&storageCSI, "storage.csi")
// - The direct use Viper in tests is possible, but discouraged because
// it only works in test suites which use Viper (which is not
// required) and the supported options cannot be
// discovered by a test suite user.
//
// Test suite authors can use framework/viper to make all command line
// parameters also configurable via a configuration file.
Expand Down Expand Up @@ -193,6 +193,8 @@ type TestContextType struct {
KoordinatorComponentNamespace string
// SLOCtrlConfigMap is the name of the slo-controller configmap.
SLOCtrlConfigMap string
// KoordSchedulerName is the SchedulerName of the koord-scheduler.
KoordSchedulerName string
}

// NodeKillerConfig describes configuration of NodeKiller -- a utility to
Expand Down Expand Up @@ -335,6 +337,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
// koordinator configs
flags.StringVar(&TestContext.KoordinatorComponentNamespace, "koordinator-component-namespace", "koordinator-system", "The namespace of the koordinator components deployed to.")
flags.StringVar(&TestContext.SLOCtrlConfigMap, "slo-config-name", "slo-controller-config", "The name of the slo-controller configmap.")
flags.StringVar(&TestContext.KoordSchedulerName, "koord-scheduler-name", "koord-scheduler", "The SchedulerName of the koord-scheduler.")
}

// RegisterClusterFlags registers flags specific to the cluster e2e test suite.
Expand Down
39 changes: 20 additions & 19 deletions test/e2e/scheduling/nodenumaresource.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,10 @@ import (

var _ = SIGDescribe("NodeNUMAResource", func() {
f := framework.NewDefaultFramework("nodenumaresource")
var koordSchedulerName string

ginkgo.BeforeEach(func() {

koordSchedulerName = framework.TestContext.KoordSchedulerName
})

framework.KoordinatorDescribe("NodeNUMAResource CPUBindPolicy", func() {
Expand Down Expand Up @@ -164,7 +165,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
},
},
PriorityClassName: string(extension.PriorityProd),
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
})
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "unable schedule the lowest priority pod")
expectPodBoundReservation(f.ClientSet, f.KoordinatorClientSet, pod.Namespace, pod.Name, reservation.Name)
Expand Down Expand Up @@ -248,7 +249,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
},
NodeName: reservation.Status.NodeName,
PriorityClassName: string(extension.PriorityProd),
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
})
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "unable schedule the lowest priority pod")
expectPodBoundReservation(f.ClientSet, f.KoordinatorClientSet, pod.Namespace, pod.Name, reservation.Name)
Expand Down Expand Up @@ -416,7 +417,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
},
}
Expand Down Expand Up @@ -454,7 +455,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
ginkgo.By("Wait for Pod schedule failed")
Expand Down Expand Up @@ -502,7 +503,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
},
}
Expand Down Expand Up @@ -545,7 +546,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
ginkgo.By("Wait for Pod schedule failed")
Expand Down Expand Up @@ -590,7 +591,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
},
}
Expand Down Expand Up @@ -633,7 +634,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
ginkgo.By("Wait for Pod schedule failed")
Expand Down Expand Up @@ -674,7 +675,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
}
Expand All @@ -689,7 +690,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
}
Expand All @@ -711,7 +712,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
ginkgo.By("Wait for Pod schedule failed")
Expand Down Expand Up @@ -757,7 +758,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
},
}
Expand Down Expand Up @@ -794,7 +795,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
nodes = sets.NewInt()
Expand Down Expand Up @@ -836,7 +837,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
}
Expand All @@ -851,7 +852,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
}
Expand All @@ -873,7 +874,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
nodes := sets.NewInt()
Expand Down Expand Up @@ -920,7 +921,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
},
}
Expand Down Expand Up @@ -957,7 +958,7 @@ var _ = SIGDescribe("NodeNUMAResource", func() {
Limits: requests,
Requests: requests,
},
SchedulerName: "koord-scheduler",
SchedulerName: koordSchedulerName,
NodeName: node.Name,
})
nodes = sets.NewInt()
Expand Down
Loading

0 comments on commit 77f919d

Please sign in to comment.