diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index a1b065c..fffeddd 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -50,7 +50,7 @@ jobs: e2e-test: name: "E2E Test" runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 15 steps: - name: Set up Go 1.17 uses: actions/setup-go@v2 diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index e885e69..55c4096 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -44,6 +44,7 @@ func TestMain(m *testing.M) { os.Exit(testenv.Run(m)) } +//deploy placement policy manifest func deploySchedulerManifest() env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { wd, err := os.Getwd() diff --git a/test/e2e/plugin_cosched_combinations_test.go b/test/e2e/plugin_cosched_combinations_test.go new file mode 100644 index 0000000..68d1046 --- /dev/null +++ b/test/e2e/plugin_cosched_combinations_test.go @@ -0,0 +1,104 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +func TestMustStrictCoscheduling(t *testing.T) { + deploymentFeat := features.New("Test Must Strict Placement policy with Coscheduling plugins"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + // deploy placement policy config + if err := deploySchedulerConfig(cfg.KubeconfigFile(), cfg.Namespace(), "examples", "v1alpha1_placementpolicy_strict_must.yml"); err != nil { + t.Error("Failed to deploy placement policy config", err) + } + + lables := map[string]string{ + "app": "nginx", + "pod-group.scheduling.sigs.k8s.io": "nginx", + } + + wd, err := os.Getwd() + if err != nil { + t.Error(err) + } + pluginsResourceAbsolutePath, err := filepath.Abs(filepath.Join(wd, "plugins/coscheduling")) + if err != nil { + t.Error(err) + } + + // deploy Coscheduling config + if err := KubectlApply(cfg.KubeconfigFile(), "scheduler-plugins", []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "")}); err != nil { + t.Error("Failed to deploy coscheduling config", err) + } + + // deploy a sample replicaset + statefulset := newStatefulSet(cfg.Namespace(), "statefulset-test", 6, lables) + if err := cfg.Client().Resources().Create(ctx, statefulset); err != nil { + t.Error("Failed to create statefulset", err) + } + + return ctx + }). + Assess("Pods successfully assigned to the right nodes with Must Strict and Coscheduling plugins option", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Error("Failed to create new client", err) + } + resultStatefulset := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "statefulset-test", Namespace: cfg.Namespace()}, + } + + if err := wait.For(conditions.New(client.Resources()).ResourceMatch(&resultStatefulset, func(object k8s.Object) bool { + s := object.(*appsv1.StatefulSet) + return s.Status.ReadyReplicas == 3 + }), wait.WithTimeout(time.Minute*2)); err != nil { + t.Error("Failed to deploy a statefulset", err) + } + + var pods corev1.PodList + if err := client.Resources().List(ctx, &pods, resources.WithLabelSelector(labels.FormatLabels(map[string]string{"app": "nginx", "pod-group.scheduling.sigs.k8s.io": "nginx"}))); err != nil { + t.Error("cannot get list of pods", err) + } + + for i := range pods.Items { + if pods.Items[i].Spec.NodeName != "placement-policy-worker3" { + continue + } else { + t.Error("pods assigned to the wrong node", err) + } + } + + return context.WithValue(ctx, "statefulset-test", &resultStatefulset) + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Error("failed to create new Client", err) + } + dep := ctx.Value("statefulset-test").(*appsv1.StatefulSet) + if err := client.Resources().Delete(ctx, dep); err != nil { + t.Error("failed to delete Statefulset", err) + } + return ctx + }).Feature() + testenv.Test(t, deploymentFeat) +} diff --git a/test/e2e/plugins/coscheduling/configmap.yaml b/test/e2e/plugins/coscheduling/configmap.yaml new file mode 100644 index 0000000..cc10259 --- /dev/null +++ b/test/e2e/plugins/coscheduling/configmap.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: scheduler-config + namespace: scheduler-plugins +data: + scheduler-config.yaml: | + apiVersion: kubescheduler.config.k8s.io/v1beta1 + kind: KubeSchedulerConfiguration + leaderElection: + leaderElect: false + profiles: + - schedulerName: scheduler-plugins-scheduler + plugins: + queueSort: + enabled: + - name: Coscheduling + disabled: + - name: "*" + preFilter: + enabled: + - name: Coscheduling + permit: + enabled: + - name: Coscheduling + reserve: + enabled: + - name: Coscheduling + postBind: + enabled: + - name: Coscheduling + pluginConfig: + - name: Coscheduling + args: + permitWaitingTimeSeconds: 10 + deniedPGExpirationTimeSeconds: 3 diff --git a/test/e2e/plugins/coscheduling/cosched.yaml b/test/e2e/plugins/coscheduling/cosched.yaml new file mode 100644 index 0000000..01d28db --- /dev/null +++ b/test/e2e/plugins/coscheduling/cosched.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.sigs.k8s.io/v1alpha1 +kind: PodGroup +metadata: + name: nginx +spec: + scheduleTimeoutSeconds: 10 + minMember: 3 diff --git a/test/e2e/plugins/coscheduling/deployment.yaml b/test/e2e/plugins/coscheduling/deployment.yaml new file mode 100644 index 0000000..91b7ef6 --- /dev/null +++ b/test/e2e/plugins/coscheduling/deployment.yaml @@ -0,0 +1,74 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: scheduler-plugins-controller + namespace: scheduler-plugins + labels: + app: scheduler-plugins-controller +spec: + replicas: 1 + selector: + matchLabels: + app: scheduler-plugins-controller + template: + metadata: + labels: + app: scheduler-plugins-controller + spec: + serviceAccount: scheduler-plugins-controller + containers: + - name: scheduler-plugins-controller + image: k8s.gcr.io/scheduler-plugins/controller:v0.20.10 + imagePullPolicy: IfNotPresent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + component: scheduler + name: scheduler-plugins-scheduler + namespace: scheduler-plugins +spec: + selector: + matchLabels: + component: scheduler + replicas: 1 + template: + metadata: + labels: + component: scheduler + spec: + serviceAccountName: scheduler-plugins-scheduler + containers: + - command: + - /bin/kube-scheduler + - --address=0.0.0.0 + - --leader-elect=false + - --config=/etc/kubernetes/scheduler-config.yaml + - --scheduler-name=scheduler-plugins-scheduler + image: k8s.gcr.io/scheduler-plugins/kube-scheduler:v0.20.10 + livenessProbe: + httpGet: + path: /healthz + port: 10251 + initialDelaySeconds: 15 + name: scheduler-plugins-scheduler + readinessProbe: + httpGet: + path: /healthz + port: 10251 + resources: + requests: + cpu: '0.1' + securityContext: + privileged: false + volumeMounts: + - name: scheduler-config + mountPath: /etc/kubernetes + readOnly: true + hostNetwork: false + hostPID: false + volumes: + - name: scheduler-config + configMap: + name: scheduler-config diff --git a/test/e2e/plugins/coscheduling/rbac.yaml b/test/e2e/plugins/coscheduling/rbac.yaml new file mode 100644 index 0000000..e8bf69e --- /dev/null +++ b/test/e2e/plugins/coscheduling/rbac.yaml @@ -0,0 +1,100 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scheduler-plugins-scheduler +rules: +- apiGroups: ["", "events.k8s.io"] + resources: ["events"] + verbs: ["create", "patch", "update"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +- apiGroups: ["coordination.k8s.io"] + resourceNames: ["kube-scheduler"] + resources: ["leases"] + verbs: ["get", "update"] +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["create"] +- apiGroups: [""] + resourceNames: ["kube-scheduler"] + resources: ["endpoints"] + verbs: ["get", "update"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["delete", "get", "list", "watch"] +- apiGroups: [""] + resources: ["bindings", "pods/binding"] + verbs: ["create"] +- apiGroups: [""] + resources: ["pods/status"] + verbs: ["patch", "update"] +- apiGroups: [""] + resources: ["replicationcontrollers", "services"] + verbs: ["get", "list", "watch"] +- apiGroups: ["apps", "extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +- apiGroups: ["apps"] + resources: ["statefulsets"] + verbs: ["get", "list", "watch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["persistentvolumeclaims", "persistentvolumes"] + verbs: ["get", "list", "watch", "patch", "update"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +- apiGroups: ["storage.k8s.io"] + resources: ["csinodes", "storageclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["scheduling.sigs.k8s.io"] + resources: ["podgroups", "elasticquotas"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scheduler-plugins-scheduler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: scheduler-plugins-scheduler +subjects: +- kind: ServiceAccount + name: scheduler-plugins-scheduler + namespace: scheduler-plugins +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scheduler-plugins-controller +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["scheduling.sigs.k8s.io"] + resources: ["podgroups", "elasticquotas"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scheduler-plugins-controller +subjects: +- kind: ServiceAccount + name: scheduler-plugins-controller + namespace: scheduler-plugins +roleRef: + kind: ClusterRole + name: scheduler-plugins-controller + apiGroup: rbac.authorization.k8s.io + diff --git a/test/e2e/plugins/coscheduling/scheduling.sigs.k8s.io_podgroups.yaml b/test/e2e/plugins/coscheduling/scheduling.sigs.k8s.io_podgroups.yaml new file mode 100644 index 0000000..0ef92c7 --- /dev/null +++ b/test/e2e/plugins/coscheduling/scheduling.sigs.k8s.io_podgroups.yaml @@ -0,0 +1,106 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/scheduler-plugins/pull/50 # edited manually + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: podgroups.scheduling.sigs.k8s.io +spec: + group: scheduling.sigs.k8s.io + names: + kind: PodGroup + listKind: PodGroupList + plural: podgroups + singular: podgroup + shortNames: # edited manually + - pg # edited manually + - pgs # edited manually + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: PodGroup is a collection of Pod; used for batch workload. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior of the pod group. + properties: + minMember: + description: MinMember defines the minimal number of members/tasks + to run the pod group; if there's not enough resources to start all + tasks, the scheduler will not start anyone. + format: int32 + type: integer + minResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: MinResources defines the minimal resource of members/tasks + to run the pod group; if there's not enough resources to start all + tasks, the scheduler will not start anyone. + type: object + scheduleTimeoutSeconds: + description: ScheduleTimeoutSeconds defines the maximal time of members/tasks + to wait before run the pod group; + format: int32 + type: integer + type: object + status: + description: Status represents the current information about a pod group. + This data may not be up to date. + properties: + failed: + description: The number of pods which reached phase Failed. + format: int32 + type: integer + occupiedBy: + description: OccupiedBy marks the workload (e.g., deployment, statefulset) + UID that occupy the podgroup. It is empty if not initialized. + type: string + phase: + description: Current phase of PodGroup. + type: string + running: + description: The number of actively running pods. + format: int32 + type: integer + scheduleStartTime: + description: ScheduleStartTime of the group + format: date-time + type: string + scheduled: + description: The number of actively running pods. + format: int32 + type: integer + succeeded: + description: The number of pods which reached phase Succeeded. + format: int32 + type: integer + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/test/e2e/plugins/coscheduling/serviceaccount.yaml b/test/e2e/plugins/coscheduling/serviceaccount.yaml new file mode 100644 index 0000000..1c89890 --- /dev/null +++ b/test/e2e/plugins/coscheduling/serviceaccount.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: scheduler-plugins +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scheduler-plugins-scheduler + namespace: scheduler-plugins +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scheduler-plugins-controller + namespace: scheduler-plugins diff --git a/test/e2e/utils.go b/test/e2e/utils.go index ba60f18..e285a1c 100644 --- a/test/e2e/utils.go +++ b/test/e2e/utils.go @@ -6,6 +6,8 @@ package e2e import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) @@ -44,7 +46,60 @@ func newDeployment(namespace, name string, replicas int32, labels map[string]str } } +func newNodeResourceDeployment(namespace, name string, replicas int32, labels map[string]string) *appsv1.Deployment { + resList := map[v1.ResourceName]string{ + v1.ResourceCPU: "1", + v1.ResourceMemory: "0", + "example.com/deviceA": "1", + "example.com/deviceB": "1", + } + res := v1.ResourceList{} + for k, v := range resList { + res[k] = resource.MustParse(v) + } + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + SchedulerName: schedulerName, + Containers: []corev1.Container{ + { + Name: "test-deployment", + Image: e2epod.GetDefaultTestImage(), + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/sleep", "10000"}, + Resources: corev1.ResourceRequirements{ + Limits: res, + Requests: res, + }, + }, + }, + }, + }, + }, + } +} + func newStatefulSet(namespace, name string, replicas int32, labels map[string]string) *appsv1.StatefulSet { + resList := map[v1.ResourceName]string{ + v1.ResourceCPU: "3000m", + v1.ResourceMemory: "300Mi", + } + res := v1.ResourceList{} + for k, v := range resList { + res[k] = resource.MustParse(v) + } + return &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -66,6 +121,10 @@ func newStatefulSet(namespace, name string, replicas int32, labels map[string]st Image: e2epod.GetDefaultTestImage(), ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"/bin/sleep", "10000"}, + Resources: corev1.ResourceRequirements{ + Limits: res, + Requests: res, + }, }, }, },