From 7e93d95c3e56557f6676572085062a61c165ee21 Mon Sep 17 00:00:00 2001 From: Heba Elayoty Date: Mon, 10 Jan 2022 11:39:44 -0800 Subject: [PATCH 1/3] e2e with other plugins --- test/e2e/main_test.go | 1 + test/e2e/plugin_cosched_combinations_test.go | 104 ++++++++ .../plugin_noderesource_combinations_test.go | 101 ++++++++ .../coscheduling/cosched-deployment.yaml | 7 + .../plugins/coscheduling/coscheduling.yaml | 224 ++++++++++++++++++ .../noderesources-deployment.yaml | 95 ++++++++ .../plugins/noderesources/noderesources.yaml | 166 +++++++++++++ test/e2e/utils.go | 59 +++++ 8 files changed, 757 insertions(+) create mode 100644 test/e2e/plugin_cosched_combinations_test.go create mode 100644 test/e2e/plugin_noderesource_combinations_test.go create mode 100644 test/e2e/plugins/coscheduling/cosched-deployment.yaml create mode 100644 test/e2e/plugins/coscheduling/coscheduling.yaml create mode 100644 test/e2e/plugins/noderesources/noderesources-deployment.yaml create mode 100644 test/e2e/plugins/noderesources/noderesources.yaml diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index e885e69..55c4096 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -44,6 +44,7 @@ func TestMain(m *testing.M) { os.Exit(testenv.Run(m)) } +//deploy placement policy manifest func deploySchedulerManifest() env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { wd, err := os.Getwd() diff --git a/test/e2e/plugin_cosched_combinations_test.go b/test/e2e/plugin_cosched_combinations_test.go new file mode 100644 index 0000000..43783bf --- /dev/null +++ b/test/e2e/plugin_cosched_combinations_test.go @@ -0,0 +1,104 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +func TestMustStrictCoscheduling(t *testing.T) { + deploymentFeat := features.New("Test Must Strict Placement policy with Coscheduling plugins"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + wd, err := os.Getwd() + if err != nil { + t.Error(err) + } + pluginsResourceAbsolutePath, err := filepath.Abs(filepath.Join(wd, "plugins/coscheduling")) + if err != nil { + t.Error(err) + } + // deploy Coscheduling config + if err := KubectlApply(cfg.KubeconfigFile(), "kube-system", []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "coscheduling.yaml")}); err != nil { + t.Error("Failed to deploy config", err) + } + + // deploy placement policy config + if err := deploySchedulerConfig(cfg.KubeconfigFile(), cfg.Namespace(), "examples", "v1alpha1_placementpolicy_strict_must.yml"); err != nil { + t.Error("Failed to deploy config", err) + } + + lables := map[string]string{ + "app": "nginx", + "pod-group.scheduling.sigs.k8s.io": "nginx", + } + // deploy a sample replicaset + statefulset := newStatefulSet(cfg.Namespace(), "statefulset-test", 6, lables) + if err := cfg.Client().Resources().Create(ctx, statefulset); err != nil { + t.Error("Failed to create statefulset", err) + } + // if err := KubectlApply(cfg.KubeconfigFile(), cfg.Namespace(), []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "cosched-deployment.yaml")}); err != nil { + // t.Error("Failed to deploy config", err) + // } + return ctx + }). + Assess("Pods successfully assigned to the right nodes with Must Strict and Coscheduling plugins option", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Error("Failed to create new client", err) + } + resultStatefulset := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "statefulset-test", Namespace: cfg.Namespace()}, + } + + if err := wait.For(conditions.New(client.Resources()).ResourceMatch(&resultStatefulset, func(object k8s.Object) bool { + s := object.(*appsv1.StatefulSet) + return s.Status.ReadyReplicas == 3 + }), wait.WithTimeout(time.Minute*2)); err != nil { + t.Error("Failed to deploy a statefulset", err) + } + + // check if 4 pods out of 10 (40%) are running in the node with the same node selector + pods := &corev1.PodList{} + if err := wait.For(conditions.New(client.Resources()).ResourceListMatchN(pods, 4, func(object k8s.Object) bool { + + if object.(*corev1.Pod).Spec.NodeName != "placement-policy-worker3" { + return true + } + return false + }, resources.WithLabelSelector(labels.FormatLabels(podSelectorLabels))), + wait.WithTimeout(time.Minute*4)); err != nil { + t.Error("number of pods assigned to nodes with the required nodeSelector do not match", err) + } + + return context.WithValue(ctx, "statefulset-test", &resultStatefulset) + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Error("failed to create new Client", err) + } + dep := ctx.Value("statefulset-test").(*appsv1.StatefulSet) + if err := client.Resources().Delete(ctx, dep); err != nil { + t.Error("failed to delete Statefulset", err) + } + return ctx + }).Feature() + testenv.Test(t, deploymentFeat) +} diff --git a/test/e2e/plugin_noderesource_combinations_test.go b/test/e2e/plugin_noderesource_combinations_test.go new file mode 100644 index 0000000..d14f002 --- /dev/null +++ b/test/e2e/plugin_noderesource_combinations_test.go @@ -0,0 +1,101 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +func TestMustStrictNoderesources(t *testing.T) { + deploymentFeat := features.New("Test Must Strict Placement policy with noderesources plugins"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + wd, err := os.Getwd() + if err != nil { + t.Error(err) + } + pluginsResourceAbsolutePath, err := filepath.Abs(filepath.Join(wd, "plugins/noderesources")) + if err != nil { + t.Error(err) + } + // deploy noderesources config + if err := KubectlApply(cfg.KubeconfigFile(), "kube-system", []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "noderesources.yaml")}); err != nil { + t.Error("Failed to deploy config", err) + } + + // deploy placement policy config + if err := deploySchedulerConfig(cfg.KubeconfigFile(), cfg.Namespace(), "examples", "v1alpha1_placementpolicy_strict_must.yml"); err != nil { + t.Error("Failed to deploy config", err) + } + + lables := map[string]string{ + "name": "test", + } + // deploy a sample replicaset + deployment := newNodeResourceDeployment(cfg.Namespace(), "deployment-test", 1, lables) + if err := cfg.Client().Resources().Create(ctx, deployment); err != nil { + t.Error("Failed to create deployment", err) + } + // if err := KubectlApply(cfg.KubeconfigFile(), cfg.Namespace(), []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "noderesources-deployment.yaml")}); err != nil { + // t.Error("Failed to deploy config", err) + // } + return ctx + }). + Assess("Pods successfully assigned to the right nodes with Must Strict and noderesources plugins option", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Error("Failed to create new client", err) + } + resultDeployment := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-test", Namespace: cfg.Namespace()}, + } + + if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&resultDeployment, appsv1.DeploymentAvailable, corev1.ConditionTrue), + wait.WithTimeout(time.Minute*2)); err != nil { + t.Error("deployment not found", err) + } + + // check if 4 pods out of 10 (40%) are running in the node with the same node selector + pods := &corev1.PodList{} + if err := wait.For(conditions.New(client.Resources()).ResourceListMatchN(pods, 4, func(object k8s.Object) bool { + + if object.(*corev1.Pod).Spec.NodeName != "placement-policy-worker3" { + return true + } + return false + }, resources.WithLabelSelector(labels.FormatLabels(podSelectorLabels))), + wait.WithTimeout(time.Minute*4)); err != nil { + t.Error("number of pods assigned to nodes with the required nodeSelector do not match", err) + } + + return context.WithValue(ctx, "deployment-test", &resultDeployment) + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Error("failed to create new Client", err) + } + dep := ctx.Value("deployment-test").(*appsv1.Deployment) + if err := client.Resources().Delete(ctx, dep); err != nil { + t.Error("failed to delete deployment", err) + } + return ctx + }).Feature() + testenv.Test(t, deploymentFeat) +} diff --git a/test/e2e/plugins/coscheduling/cosched-deployment.yaml b/test/e2e/plugins/coscheduling/cosched-deployment.yaml new file mode 100644 index 0000000..01d28db --- /dev/null +++ b/test/e2e/plugins/coscheduling/cosched-deployment.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.sigs.k8s.io/v1alpha1 +kind: PodGroup +metadata: + name: nginx +spec: + scheduleTimeoutSeconds: 10 + minMember: 3 diff --git a/test/e2e/plugins/coscheduling/coscheduling.yaml b/test/e2e/plugins/coscheduling/coscheduling.yaml new file mode 100644 index 0000000..5e3dc6c --- /dev/null +++ b/test/e2e/plugins/coscheduling/coscheduling.yaml @@ -0,0 +1,224 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/scheduler-plugins/pull/50 # edited manually + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: podgroups.scheduling.sigs.k8s.io +spec: + group: scheduling.sigs.k8s.io + names: + kind: PodGroup + listKind: PodGroupList + plural: podgroups + singular: podgroup + shortNames: # edited manually + - pg # edited manually + - pgs # edited manually + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: PodGroup is a collection of Pod; used for batch workload. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior of the pod group. + properties: + minMember: + description: MinMember defines the minimal number of members/tasks + to run the pod group; if there's not enough resources to start all + tasks, the scheduler will not start anyone. + format: int32 + type: integer + minResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: MinResources defines the minimal resource of members/tasks + to run the pod group; if there's not enough resources to start all + tasks, the scheduler will not start anyone. + type: object + scheduleTimeoutSeconds: + description: ScheduleTimeoutSeconds defines the maximal time of members/tasks + to wait before run the pod group; + format: int32 + type: integer + type: object + status: + description: Status represents the current information about a pod group. + This data may not be up to date. + properties: + failed: + description: The number of pods which reached phase Failed. + format: int32 + type: integer + occupiedBy: + description: OccupiedBy marks the workload (e.g., deployment, statefulset) + UID that occupy the podgroup. It is empty if not initialized. + type: string + phase: + description: Current phase of PodGroup. + type: string + running: + description: The number of actively running pods. + format: int32 + type: integer + scheduleStartTime: + description: ScheduleStartTime of the group + format: date-time + type: string + scheduled: + description: The number of actively running pods. + format: int32 + type: integer + succeeded: + description: The number of pods which reached phase Succeeded. + format: int32 + type: integer + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: kubescheduler.config.k8s.io/v1beta2 +kind: KubeSchedulerConfiguration +leaderElection: + leaderElect: false +clientConnection: + kubeconfig: "REPLACE_ME_WITH_KUBE_CONFIG_PATH" +profiles: +- schedulerName: Cosched-sched + plugins: + queueSort: + enabled: + - name: Coscheduling + disabled: + - name: "*" + preFilter: + enabled: + - name: Coscheduling + postFilter: + enabled: + - name: Coscheduling + permit: + enabled: + - name: Coscheduling + reserve: + enabled: + - name: Coscheduling + postBind: + enabled: + - name: Coscheduling + pluginConfig: + - name: Coscheduling + args: + permitWaitingTimeSeconds: 10 + deniedPGExpirationTimeSeconds: 3 +--- +#First part +# Apply extra privileges to system:kube-scheduler. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-scheduler:plugins +rules: +- apiGroups: ["scheduling.sigs.k8s.io"] + resources: ["podgroups", "elasticquotas"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-scheduler:plugins +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-scheduler:plugins +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:kube-scheduler +--- +# Second part +# Install the controller image. +apiVersion: v1 +kind: Namespace +metadata: + name: scheduler-plugins +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scheduler-plugins-controller + namespace: scheduler-plugins +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scheduler-plugins-controller +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["scheduling.sigs.k8s.io"] + resources: ["podgroups", "elasticquotas"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scheduler-plugins-controller +subjects: +- kind: ServiceAccount + name: scheduler-plugins-controller + namespace: scheduler-plugins +roleRef: + kind: ClusterRole + name: scheduler-plugins-controller + apiGroup: rbac.authorization.k8s.io +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: scheduler-plugins-controller + namespace: scheduler-plugins + labels: + app: scheduler-plugins-controller +spec: + replicas: 1 + selector: + matchLabels: + app: scheduler-plugins-controller + template: + metadata: + labels: + app: scheduler-plugins-controller + spec: + serviceAccount: scheduler-plugins-controller + containers: + - name: scheduler-plugins-controller + image: k8s.gcr.io/scheduler-plugins/controller:v0.20.10 + imagePullPolicy: IfNotPresent diff --git a/test/e2e/plugins/noderesources/noderesources-deployment.yaml b/test/e2e/plugins/noderesources/noderesources-deployment.yaml new file mode 100644 index 0000000..2c796c4 --- /dev/null +++ b/test/e2e/plugins/noderesources/noderesources-deployment.yaml @@ -0,0 +1,95 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deployment +spec: + selector: + matchLabels: + name: test + template: + metadata: + labels: + name: test + spec: + schedulerName: topo-aware-scheduler + containers: + - name: test-deployment-1-container-1 + image: quay.io/fromani/numalign + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: [ "while true; do numalign; sleep 100000; done;" ] + resources: + limits: + cpu: 1 + memory: 0 + example.com/deviceA: 1 + example.com/deviceB: 1 + requests: + cpu: 1 + memory: 0 + example.com/deviceA: 1 + example.com/deviceB: 1 +--- +--- +apiVersion: topology.node.k8s.io/v1alpha1 +kind: NodeResourceTopology +metadata: + name: worker-node-a +topologyPolicies: ["SingleNUMANodeContainerLevel"] +zones: + - name: node-0 + type: Node + resources: + - name: cpu + capacity: 4 + allocatable: 3 + - name: example.com/deviceA + capacity: 1 + allocatable: 1 + - name: example.com/deviceB + capacity: 2 + allocatable: 2 + - name: node-1 + type: Node + resources: + - name: cpu + capacity: 4 + allocatable: 3 + - name: example.com/deviceA + capacity: 2 + allocatable: 2 + - name: example.com/deviceB + capacity: 1 + allocatable: 1 +--- +--- +apiVersion: topology.node.k8s.io/v1alpha1 +kind: NodeResourceTopology +metadata: + name: worker-node-a +topologyPolicies: ["SingleNUMANodeContainerLevel"] +zones: + - name: node-0 + type: Node + resources: + - name: cpu + capacity: 4 + allocatable: 3 + - name: example.com/deviceA + capacity: 1 + allocatable: 1 + - name: example.com/deviceB + capacity: 2 + allocatable: 2 + - name: node-1 + type: Node + resources: + - name: cpu + capacity: 4 + allocatable: 3 + - name: example.com/deviceA + capacity: 2 + allocatable: 2 + - name: example.com/deviceB + capacity: 1 + allocatable: 1 diff --git a/test/e2e/plugins/noderesources/noderesources.yaml b/test/e2e/plugins/noderesources/noderesources.yaml new file mode 100644 index 0000000..f7295b4 --- /dev/null +++ b/test/e2e/plugins/noderesources/noderesources.yaml @@ -0,0 +1,166 @@ +--- +apiVersion: kubescheduler.config.k8s.io/v1beta2 +kind: KubeSchedulerConfiguration +leaderElection: + leaderElect: false +clientConnection: + kubeconfig: "/etc/kubernetes/scheduler.conf" +profiles: + - schedulerName: topo-aware-scheduler + plugins: + filter: + enabled: + - name: NodeResourceTopologyMatch + score: + enabled: + - name: NodeResourceTopologyMatch + # optional plugin configs + pluginConfig: + - name: NodeResourceTopologyMatch + args: + scoringStrategy: + type: "LeastAllocated" +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes/enhancements/pull/1870 # edited manually + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: noderesourcetopologies.topology.node.k8s.io +spec: + group: topology.node.k8s.io + names: + kind: NodeResourceTopology + listKind: NodeResourceTopologyList + plural: noderesourcetopologies + shortNames: + - node-res-topo + singular: noderesourcetopology + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NodeResourceTopology describes node resources and their topology. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + topologyPolicies: + items: + type: string + type: array + zones: + description: ZoneList contains an array of Zone objects. + items: + description: Zone represents a resource topology zone, e.g. socket, + node, die or core. + properties: + attributes: + description: AttributeList contains an array of AttributeInfo objects. + items: + description: AttributeInfo contains one attribute of a Zone. + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + costs: + description: CostList contains an array of CostInfo objects. + items: + description: CostInfo describes the cost (or distance) between + two Zones. + properties: + name: + type: string + value: + format: int64 + type: integer + required: + - name + - value + type: object + type: array + name: + type: string + parent: + type: string + resources: + description: ResourceInfoList contains an array of ResourceInfo + objects. + items: + description: ResourceInfo contains information about one resource + type. + properties: + allocatable: + anyOf: + - type: integer + - type: string + description: Allocatable quantity of the resource, corresponding + to allocatable in node status, i.e. total amount of this + resource available to be used by pods. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + available: + anyOf: + - type: integer + - type: string + description: Available is the amount of this resource currently + available for new (to be scheduled) pods, i.e. Allocatable + minus the resources reserved by currently running pods. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + capacity: + anyOf: + - type: integer + - type: string + description: Capacity of the resource, corresponding to capacity + in node status, i.e. total amount of this resource that + the node has. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + name: + description: Name of the resource. + type: string + required: + - allocatable + - available + - capacity + - name + type: object + type: array + type: + type: string + required: + - name + - type + type: object + type: array + required: + - topologyPolicies + - zones + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/test/e2e/utils.go b/test/e2e/utils.go index ba60f18..e285a1c 100644 --- a/test/e2e/utils.go +++ b/test/e2e/utils.go @@ -6,6 +6,8 @@ package e2e import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) @@ -44,7 +46,60 @@ func newDeployment(namespace, name string, replicas int32, labels map[string]str } } +func newNodeResourceDeployment(namespace, name string, replicas int32, labels map[string]string) *appsv1.Deployment { + resList := map[v1.ResourceName]string{ + v1.ResourceCPU: "1", + v1.ResourceMemory: "0", + "example.com/deviceA": "1", + "example.com/deviceB": "1", + } + res := v1.ResourceList{} + for k, v := range resList { + res[k] = resource.MustParse(v) + } + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + SchedulerName: schedulerName, + Containers: []corev1.Container{ + { + Name: "test-deployment", + Image: e2epod.GetDefaultTestImage(), + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/sleep", "10000"}, + Resources: corev1.ResourceRequirements{ + Limits: res, + Requests: res, + }, + }, + }, + }, + }, + }, + } +} + func newStatefulSet(namespace, name string, replicas int32, labels map[string]string) *appsv1.StatefulSet { + resList := map[v1.ResourceName]string{ + v1.ResourceCPU: "3000m", + v1.ResourceMemory: "300Mi", + } + res := v1.ResourceList{} + for k, v := range resList { + res[k] = resource.MustParse(v) + } + return &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -66,6 +121,10 @@ func newStatefulSet(namespace, name string, replicas int32, labels map[string]st Image: e2epod.GetDefaultTestImage(), ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"/bin/sleep", "10000"}, + Resources: corev1.ResourceRequirements{ + Limits: res, + Requests: res, + }, }, }, }, From 97150533db6307dbdc831313f7fb645fe3a8168a Mon Sep 17 00:00:00 2001 From: Heba Elayoty Date: Mon, 10 Jan 2022 15:00:27 -0800 Subject: [PATCH 2/3] update extected results --- test/e2e/plugin_cosched_combinations_test.go | 22 ++++++++--------- .../plugin_noderesource_combinations_test.go | 24 ++++++++----------- .../noderesources-deployment.yaml | 14 +++++------ 3 files changed, 26 insertions(+), 34 deletions(-) diff --git a/test/e2e/plugin_cosched_combinations_test.go b/test/e2e/plugin_cosched_combinations_test.go index 43783bf..547c67f 100644 --- a/test/e2e/plugin_cosched_combinations_test.go +++ b/test/e2e/plugin_cosched_combinations_test.go @@ -53,9 +53,7 @@ func TestMustStrictCoscheduling(t *testing.T) { if err := cfg.Client().Resources().Create(ctx, statefulset); err != nil { t.Error("Failed to create statefulset", err) } - // if err := KubectlApply(cfg.KubeconfigFile(), cfg.Namespace(), []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "cosched-deployment.yaml")}); err != nil { - // t.Error("Failed to deploy config", err) - // } + return ctx }). Assess("Pods successfully assigned to the right nodes with Must Strict and Coscheduling plugins option", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { @@ -74,17 +72,17 @@ func TestMustStrictCoscheduling(t *testing.T) { t.Error("Failed to deploy a statefulset", err) } - // check if 4 pods out of 10 (40%) are running in the node with the same node selector - pods := &corev1.PodList{} - if err := wait.For(conditions.New(client.Resources()).ResourceListMatchN(pods, 4, func(object k8s.Object) bool { + var pods corev1.PodList + if err := client.Resources().List(ctx, &pods, resources.WithLabelSelector(labels.FormatLabels(map[string]string{"app": "nginx", "pod-group.scheduling.sigs.k8s.io": "nginx"}))); err != nil { + t.Error("cannot get list of pods", err) + } - if object.(*corev1.Pod).Spec.NodeName != "placement-policy-worker3" { - return true + for i := range pods.Items { + if pods.Items[i].Spec.NodeName != "placement-policy-worker3" { + continue + } else { + t.Error("pods assigned to the wrong node", err) } - return false - }, resources.WithLabelSelector(labels.FormatLabels(podSelectorLabels))), - wait.WithTimeout(time.Minute*4)); err != nil { - t.Error("number of pods assigned to nodes with the required nodeSelector do not match", err) } return context.WithValue(ctx, "statefulset-test", &resultStatefulset) diff --git a/test/e2e/plugin_noderesource_combinations_test.go b/test/e2e/plugin_noderesource_combinations_test.go index d14f002..e95eed8 100644 --- a/test/e2e/plugin_noderesource_combinations_test.go +++ b/test/e2e/plugin_noderesource_combinations_test.go @@ -15,7 +15,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/e2e-framework/klient/k8s" "sigs.k8s.io/e2e-framework/klient/k8s/resources" "sigs.k8s.io/e2e-framework/klient/wait" "sigs.k8s.io/e2e-framework/klient/wait/conditions" @@ -52,9 +51,7 @@ func TestMustStrictNoderesources(t *testing.T) { if err := cfg.Client().Resources().Create(ctx, deployment); err != nil { t.Error("Failed to create deployment", err) } - // if err := KubectlApply(cfg.KubeconfigFile(), cfg.Namespace(), []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "noderesources-deployment.yaml")}); err != nil { - // t.Error("Failed to deploy config", err) - // } + return ctx }). Assess("Pods successfully assigned to the right nodes with Must Strict and noderesources plugins option", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { @@ -71,19 +68,18 @@ func TestMustStrictNoderesources(t *testing.T) { t.Error("deployment not found", err) } - // check if 4 pods out of 10 (40%) are running in the node with the same node selector - pods := &corev1.PodList{} - if err := wait.For(conditions.New(client.Resources()).ResourceListMatchN(pods, 4, func(object k8s.Object) bool { + var pods corev1.PodList + if err := client.Resources().List(ctx, &pods, resources.WithLabelSelector(labels.FormatLabels(map[string]string{"name": "test"}))); err != nil { + t.Error("cannot get list of pods", err) + } - if object.(*corev1.Pod).Spec.NodeName != "placement-policy-worker3" { - return true + for i := range pods.Items { + if pods.Items[i].Spec.NodeName != "placement-policy-worker3" { + continue + } else { + t.Error("pods assigned to the wrong node", err) } - return false - }, resources.WithLabelSelector(labels.FormatLabels(podSelectorLabels))), - wait.WithTimeout(time.Minute*4)); err != nil { - t.Error("number of pods assigned to nodes with the required nodeSelector do not match", err) } - return context.WithValue(ctx, "deployment-test", &resultDeployment) }). Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { diff --git a/test/e2e/plugins/noderesources/noderesources-deployment.yaml b/test/e2e/plugins/noderesources/noderesources-deployment.yaml index 2c796c4..357e59c 100644 --- a/test/e2e/plugins/noderesources/noderesources-deployment.yaml +++ b/test/e2e/plugins/noderesources/noderesources-deployment.yaml @@ -30,14 +30,13 @@ spec: example.com/deviceA: 1 example.com/deviceB: 1 --- ---- apiVersion: topology.node.k8s.io/v1alpha1 kind: NodeResourceTopology metadata: - name: worker-node-a + name: worker1 topologyPolicies: ["SingleNUMANodeContainerLevel"] zones: - - name: node-0 + - name: placement-policy-worker1 type: Node resources: - name: cpu @@ -49,7 +48,7 @@ zones: - name: example.com/deviceB capacity: 2 allocatable: 2 - - name: node-1 + - name: placement-policy-worker2 type: Node resources: - name: cpu @@ -62,14 +61,13 @@ zones: capacity: 1 allocatable: 1 --- ---- apiVersion: topology.node.k8s.io/v1alpha1 kind: NodeResourceTopology metadata: - name: worker-node-a + name: worker2 topologyPolicies: ["SingleNUMANodeContainerLevel"] zones: - - name: node-0 + - name: placement-policy-worker1 type: Node resources: - name: cpu @@ -81,7 +79,7 @@ zones: - name: example.com/deviceB capacity: 2 allocatable: 2 - - name: node-1 + - name: placement-policy-worker2 type: Node resources: - name: cpu From 019c7a2b283aa416fc4ab883a000f3dbcd95c935 Mon Sep 17 00:00:00 2001 From: Heba Elayoty Date: Mon, 10 Jan 2022 17:11:19 -0800 Subject: [PATCH 3/3] add deployment for coscheduling --- .github/workflows/workflow.yml | 2 +- test/e2e/plugin_cosched_combinations_test.go | 24 +-- .../plugin_noderesource_combinations_test.go | 97 ---------- test/e2e/plugins/coscheduling/configmap.yaml | 36 ++++ .../{cosched-deployment.yaml => cosched.yaml} | 0 test/e2e/plugins/coscheduling/deployment.yaml | 74 ++++++++ test/e2e/plugins/coscheduling/rbac.yaml | 100 +++++++++++ ... => scheduling.sigs.k8s.io_podgroups.yaml} | 122 +------------ .../plugins/coscheduling/serviceaccount.yaml | 16 ++ .../noderesources-deployment.yaml | 93 ---------- .../plugins/noderesources/noderesources.yaml | 166 ------------------ 11 files changed, 242 insertions(+), 488 deletions(-) delete mode 100644 test/e2e/plugin_noderesource_combinations_test.go create mode 100644 test/e2e/plugins/coscheduling/configmap.yaml rename test/e2e/plugins/coscheduling/{cosched-deployment.yaml => cosched.yaml} (100%) create mode 100644 test/e2e/plugins/coscheduling/deployment.yaml create mode 100644 test/e2e/plugins/coscheduling/rbac.yaml rename test/e2e/plugins/coscheduling/{coscheduling.yaml => scheduling.sigs.k8s.io_podgroups.yaml} (60%) create mode 100644 test/e2e/plugins/coscheduling/serviceaccount.yaml delete mode 100644 test/e2e/plugins/noderesources/noderesources-deployment.yaml delete mode 100644 test/e2e/plugins/noderesources/noderesources.yaml diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index a1b065c..fffeddd 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -50,7 +50,7 @@ jobs: e2e-test: name: "E2E Test" runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 15 steps: - name: Set up Go 1.17 uses: actions/setup-go@v2 diff --git a/test/e2e/plugin_cosched_combinations_test.go b/test/e2e/plugin_cosched_combinations_test.go index 547c67f..68d1046 100644 --- a/test/e2e/plugin_cosched_combinations_test.go +++ b/test/e2e/plugin_cosched_combinations_test.go @@ -26,6 +26,16 @@ import ( func TestMustStrictCoscheduling(t *testing.T) { deploymentFeat := features.New("Test Must Strict Placement policy with Coscheduling plugins"). Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + // deploy placement policy config + if err := deploySchedulerConfig(cfg.KubeconfigFile(), cfg.Namespace(), "examples", "v1alpha1_placementpolicy_strict_must.yml"); err != nil { + t.Error("Failed to deploy placement policy config", err) + } + + lables := map[string]string{ + "app": "nginx", + "pod-group.scheduling.sigs.k8s.io": "nginx", + } + wd, err := os.Getwd() if err != nil { t.Error(err) @@ -34,20 +44,12 @@ func TestMustStrictCoscheduling(t *testing.T) { if err != nil { t.Error(err) } - // deploy Coscheduling config - if err := KubectlApply(cfg.KubeconfigFile(), "kube-system", []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "coscheduling.yaml")}); err != nil { - t.Error("Failed to deploy config", err) - } - // deploy placement policy config - if err := deploySchedulerConfig(cfg.KubeconfigFile(), cfg.Namespace(), "examples", "v1alpha1_placementpolicy_strict_must.yml"); err != nil { - t.Error("Failed to deploy config", err) + // deploy Coscheduling config + if err := KubectlApply(cfg.KubeconfigFile(), "scheduler-plugins", []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "")}); err != nil { + t.Error("Failed to deploy coscheduling config", err) } - lables := map[string]string{ - "app": "nginx", - "pod-group.scheduling.sigs.k8s.io": "nginx", - } // deploy a sample replicaset statefulset := newStatefulSet(cfg.Namespace(), "statefulset-test", 6, lables) if err := cfg.Client().Resources().Create(ctx, statefulset); err != nil { diff --git a/test/e2e/plugin_noderesource_combinations_test.go b/test/e2e/plugin_noderesource_combinations_test.go deleted file mode 100644 index e95eed8..0000000 --- a/test/e2e/plugin_noderesource_combinations_test.go +++ /dev/null @@ -1,97 +0,0 @@ -//go:build e2e -// +build e2e - -package e2e - -import ( - "context" - "fmt" - "os" - "path/filepath" - "testing" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/e2e-framework/klient/k8s/resources" - "sigs.k8s.io/e2e-framework/klient/wait" - "sigs.k8s.io/e2e-framework/klient/wait/conditions" - "sigs.k8s.io/e2e-framework/pkg/envconf" - "sigs.k8s.io/e2e-framework/pkg/features" -) - -func TestMustStrictNoderesources(t *testing.T) { - deploymentFeat := features.New("Test Must Strict Placement policy with noderesources plugins"). - Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - wd, err := os.Getwd() - if err != nil { - t.Error(err) - } - pluginsResourceAbsolutePath, err := filepath.Abs(filepath.Join(wd, "plugins/noderesources")) - if err != nil { - t.Error(err) - } - // deploy noderesources config - if err := KubectlApply(cfg.KubeconfigFile(), "kube-system", []string{"-f", fmt.Sprintf("%s/%s", pluginsResourceAbsolutePath, "noderesources.yaml")}); err != nil { - t.Error("Failed to deploy config", err) - } - - // deploy placement policy config - if err := deploySchedulerConfig(cfg.KubeconfigFile(), cfg.Namespace(), "examples", "v1alpha1_placementpolicy_strict_must.yml"); err != nil { - t.Error("Failed to deploy config", err) - } - - lables := map[string]string{ - "name": "test", - } - // deploy a sample replicaset - deployment := newNodeResourceDeployment(cfg.Namespace(), "deployment-test", 1, lables) - if err := cfg.Client().Resources().Create(ctx, deployment); err != nil { - t.Error("Failed to create deployment", err) - } - - return ctx - }). - Assess("Pods successfully assigned to the right nodes with Must Strict and noderesources plugins option", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - client, err := cfg.NewClient() - if err != nil { - t.Error("Failed to create new client", err) - } - resultDeployment := appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "deployment-test", Namespace: cfg.Namespace()}, - } - - if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&resultDeployment, appsv1.DeploymentAvailable, corev1.ConditionTrue), - wait.WithTimeout(time.Minute*2)); err != nil { - t.Error("deployment not found", err) - } - - var pods corev1.PodList - if err := client.Resources().List(ctx, &pods, resources.WithLabelSelector(labels.FormatLabels(map[string]string{"name": "test"}))); err != nil { - t.Error("cannot get list of pods", err) - } - - for i := range pods.Items { - if pods.Items[i].Spec.NodeName != "placement-policy-worker3" { - continue - } else { - t.Error("pods assigned to the wrong node", err) - } - } - return context.WithValue(ctx, "deployment-test", &resultDeployment) - }). - Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - client, err := cfg.NewClient() - if err != nil { - t.Error("failed to create new Client", err) - } - dep := ctx.Value("deployment-test").(*appsv1.Deployment) - if err := client.Resources().Delete(ctx, dep); err != nil { - t.Error("failed to delete deployment", err) - } - return ctx - }).Feature() - testenv.Test(t, deploymentFeat) -} diff --git a/test/e2e/plugins/coscheduling/configmap.yaml b/test/e2e/plugins/coscheduling/configmap.yaml new file mode 100644 index 0000000..cc10259 --- /dev/null +++ b/test/e2e/plugins/coscheduling/configmap.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: scheduler-config + namespace: scheduler-plugins +data: + scheduler-config.yaml: | + apiVersion: kubescheduler.config.k8s.io/v1beta1 + kind: KubeSchedulerConfiguration + leaderElection: + leaderElect: false + profiles: + - schedulerName: scheduler-plugins-scheduler + plugins: + queueSort: + enabled: + - name: Coscheduling + disabled: + - name: "*" + preFilter: + enabled: + - name: Coscheduling + permit: + enabled: + - name: Coscheduling + reserve: + enabled: + - name: Coscheduling + postBind: + enabled: + - name: Coscheduling + pluginConfig: + - name: Coscheduling + args: + permitWaitingTimeSeconds: 10 + deniedPGExpirationTimeSeconds: 3 diff --git a/test/e2e/plugins/coscheduling/cosched-deployment.yaml b/test/e2e/plugins/coscheduling/cosched.yaml similarity index 100% rename from test/e2e/plugins/coscheduling/cosched-deployment.yaml rename to test/e2e/plugins/coscheduling/cosched.yaml diff --git a/test/e2e/plugins/coscheduling/deployment.yaml b/test/e2e/plugins/coscheduling/deployment.yaml new file mode 100644 index 0000000..91b7ef6 --- /dev/null +++ b/test/e2e/plugins/coscheduling/deployment.yaml @@ -0,0 +1,74 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: scheduler-plugins-controller + namespace: scheduler-plugins + labels: + app: scheduler-plugins-controller +spec: + replicas: 1 + selector: + matchLabels: + app: scheduler-plugins-controller + template: + metadata: + labels: + app: scheduler-plugins-controller + spec: + serviceAccount: scheduler-plugins-controller + containers: + - name: scheduler-plugins-controller + image: k8s.gcr.io/scheduler-plugins/controller:v0.20.10 + imagePullPolicy: IfNotPresent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + component: scheduler + name: scheduler-plugins-scheduler + namespace: scheduler-plugins +spec: + selector: + matchLabels: + component: scheduler + replicas: 1 + template: + metadata: + labels: + component: scheduler + spec: + serviceAccountName: scheduler-plugins-scheduler + containers: + - command: + - /bin/kube-scheduler + - --address=0.0.0.0 + - --leader-elect=false + - --config=/etc/kubernetes/scheduler-config.yaml + - --scheduler-name=scheduler-plugins-scheduler + image: k8s.gcr.io/scheduler-plugins/kube-scheduler:v0.20.10 + livenessProbe: + httpGet: + path: /healthz + port: 10251 + initialDelaySeconds: 15 + name: scheduler-plugins-scheduler + readinessProbe: + httpGet: + path: /healthz + port: 10251 + resources: + requests: + cpu: '0.1' + securityContext: + privileged: false + volumeMounts: + - name: scheduler-config + mountPath: /etc/kubernetes + readOnly: true + hostNetwork: false + hostPID: false + volumes: + - name: scheduler-config + configMap: + name: scheduler-config diff --git a/test/e2e/plugins/coscheduling/rbac.yaml b/test/e2e/plugins/coscheduling/rbac.yaml new file mode 100644 index 0000000..e8bf69e --- /dev/null +++ b/test/e2e/plugins/coscheduling/rbac.yaml @@ -0,0 +1,100 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scheduler-plugins-scheduler +rules: +- apiGroups: ["", "events.k8s.io"] + resources: ["events"] + verbs: ["create", "patch", "update"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +- apiGroups: ["coordination.k8s.io"] + resourceNames: ["kube-scheduler"] + resources: ["leases"] + verbs: ["get", "update"] +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["create"] +- apiGroups: [""] + resourceNames: ["kube-scheduler"] + resources: ["endpoints"] + verbs: ["get", "update"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["delete", "get", "list", "watch"] +- apiGroups: [""] + resources: ["bindings", "pods/binding"] + verbs: ["create"] +- apiGroups: [""] + resources: ["pods/status"] + verbs: ["patch", "update"] +- apiGroups: [""] + resources: ["replicationcontrollers", "services"] + verbs: ["get", "list", "watch"] +- apiGroups: ["apps", "extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +- apiGroups: ["apps"] + resources: ["statefulsets"] + verbs: ["get", "list", "watch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["persistentvolumeclaims", "persistentvolumes"] + verbs: ["get", "list", "watch", "patch", "update"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +- apiGroups: ["storage.k8s.io"] + resources: ["csinodes", "storageclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["scheduling.sigs.k8s.io"] + resources: ["podgroups", "elasticquotas"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scheduler-plugins-scheduler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: scheduler-plugins-scheduler +subjects: +- kind: ServiceAccount + name: scheduler-plugins-scheduler + namespace: scheduler-plugins +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scheduler-plugins-controller +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["scheduling.sigs.k8s.io"] + resources: ["podgroups", "elasticquotas"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scheduler-plugins-controller +subjects: +- kind: ServiceAccount + name: scheduler-plugins-controller + namespace: scheduler-plugins +roleRef: + kind: ClusterRole + name: scheduler-plugins-controller + apiGroup: rbac.authorization.k8s.io + diff --git a/test/e2e/plugins/coscheduling/coscheduling.yaml b/test/e2e/plugins/coscheduling/scheduling.sigs.k8s.io_podgroups.yaml similarity index 60% rename from test/e2e/plugins/coscheduling/coscheduling.yaml rename to test/e2e/plugins/coscheduling/scheduling.sigs.k8s.io_podgroups.yaml index 5e3dc6c..0ef92c7 100644 --- a/test/e2e/plugins/coscheduling/coscheduling.yaml +++ b/test/e2e/plugins/coscheduling/scheduling.sigs.k8s.io_podgroups.yaml @@ -1,3 +1,5 @@ + +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -102,123 +104,3 @@ status: plural: "" conditions: [] storedVersions: [] ---- -apiVersion: kubescheduler.config.k8s.io/v1beta2 -kind: KubeSchedulerConfiguration -leaderElection: - leaderElect: false -clientConnection: - kubeconfig: "REPLACE_ME_WITH_KUBE_CONFIG_PATH" -profiles: -- schedulerName: Cosched-sched - plugins: - queueSort: - enabled: - - name: Coscheduling - disabled: - - name: "*" - preFilter: - enabled: - - name: Coscheduling - postFilter: - enabled: - - name: Coscheduling - permit: - enabled: - - name: Coscheduling - reserve: - enabled: - - name: Coscheduling - postBind: - enabled: - - name: Coscheduling - pluginConfig: - - name: Coscheduling - args: - permitWaitingTimeSeconds: 10 - deniedPGExpirationTimeSeconds: 3 ---- -#First part -# Apply extra privileges to system:kube-scheduler. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:kube-scheduler:plugins -rules: -- apiGroups: ["scheduling.sigs.k8s.io"] - resources: ["podgroups", "elasticquotas"] - verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:kube-scheduler:plugins -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:kube-scheduler:plugins -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: system:kube-scheduler ---- -# Second part -# Install the controller image. -apiVersion: v1 -kind: Namespace -metadata: - name: scheduler-plugins ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: scheduler-plugins-controller - namespace: scheduler-plugins ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: scheduler-plugins-controller -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] -- apiGroups: ["scheduling.sigs.k8s.io"] - resources: ["podgroups", "elasticquotas"] - verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: scheduler-plugins-controller -subjects: -- kind: ServiceAccount - name: scheduler-plugins-controller - namespace: scheduler-plugins -roleRef: - kind: ClusterRole - name: scheduler-plugins-controller - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: scheduler-plugins-controller - namespace: scheduler-plugins - labels: - app: scheduler-plugins-controller -spec: - replicas: 1 - selector: - matchLabels: - app: scheduler-plugins-controller - template: - metadata: - labels: - app: scheduler-plugins-controller - spec: - serviceAccount: scheduler-plugins-controller - containers: - - name: scheduler-plugins-controller - image: k8s.gcr.io/scheduler-plugins/controller:v0.20.10 - imagePullPolicy: IfNotPresent diff --git a/test/e2e/plugins/coscheduling/serviceaccount.yaml b/test/e2e/plugins/coscheduling/serviceaccount.yaml new file mode 100644 index 0000000..1c89890 --- /dev/null +++ b/test/e2e/plugins/coscheduling/serviceaccount.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: scheduler-plugins +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scheduler-plugins-scheduler + namespace: scheduler-plugins +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scheduler-plugins-controller + namespace: scheduler-plugins diff --git a/test/e2e/plugins/noderesources/noderesources-deployment.yaml b/test/e2e/plugins/noderesources/noderesources-deployment.yaml deleted file mode 100644 index 357e59c..0000000 --- a/test/e2e/plugins/noderesources/noderesources-deployment.yaml +++ /dev/null @@ -1,93 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test-deployment -spec: - selector: - matchLabels: - name: test - template: - metadata: - labels: - name: test - spec: - schedulerName: topo-aware-scheduler - containers: - - name: test-deployment-1-container-1 - image: quay.io/fromani/numalign - imagePullPolicy: IfNotPresent - command: ["/bin/sh", "-c"] - args: [ "while true; do numalign; sleep 100000; done;" ] - resources: - limits: - cpu: 1 - memory: 0 - example.com/deviceA: 1 - example.com/deviceB: 1 - requests: - cpu: 1 - memory: 0 - example.com/deviceA: 1 - example.com/deviceB: 1 ---- -apiVersion: topology.node.k8s.io/v1alpha1 -kind: NodeResourceTopology -metadata: - name: worker1 -topologyPolicies: ["SingleNUMANodeContainerLevel"] -zones: - - name: placement-policy-worker1 - type: Node - resources: - - name: cpu - capacity: 4 - allocatable: 3 - - name: example.com/deviceA - capacity: 1 - allocatable: 1 - - name: example.com/deviceB - capacity: 2 - allocatable: 2 - - name: placement-policy-worker2 - type: Node - resources: - - name: cpu - capacity: 4 - allocatable: 3 - - name: example.com/deviceA - capacity: 2 - allocatable: 2 - - name: example.com/deviceB - capacity: 1 - allocatable: 1 ---- -apiVersion: topology.node.k8s.io/v1alpha1 -kind: NodeResourceTopology -metadata: - name: worker2 -topologyPolicies: ["SingleNUMANodeContainerLevel"] -zones: - - name: placement-policy-worker1 - type: Node - resources: - - name: cpu - capacity: 4 - allocatable: 3 - - name: example.com/deviceA - capacity: 1 - allocatable: 1 - - name: example.com/deviceB - capacity: 2 - allocatable: 2 - - name: placement-policy-worker2 - type: Node - resources: - - name: cpu - capacity: 4 - allocatable: 3 - - name: example.com/deviceA - capacity: 2 - allocatable: 2 - - name: example.com/deviceB - capacity: 1 - allocatable: 1 diff --git a/test/e2e/plugins/noderesources/noderesources.yaml b/test/e2e/plugins/noderesources/noderesources.yaml deleted file mode 100644 index f7295b4..0000000 --- a/test/e2e/plugins/noderesources/noderesources.yaml +++ /dev/null @@ -1,166 +0,0 @@ ---- -apiVersion: kubescheduler.config.k8s.io/v1beta2 -kind: KubeSchedulerConfiguration -leaderElection: - leaderElect: false -clientConnection: - kubeconfig: "/etc/kubernetes/scheduler.conf" -profiles: - - schedulerName: topo-aware-scheduler - plugins: - filter: - enabled: - - name: NodeResourceTopologyMatch - score: - enabled: - - name: NodeResourceTopologyMatch - # optional plugin configs - pluginConfig: - - name: NodeResourceTopologyMatch - args: - scoringStrategy: - type: "LeastAllocated" ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.kubernetes.io: https://github.com/kubernetes/enhancements/pull/1870 # edited manually - controller-gen.kubebuilder.io/version: v0.6.2 - creationTimestamp: null - name: noderesourcetopologies.topology.node.k8s.io -spec: - group: topology.node.k8s.io - names: - kind: NodeResourceTopology - listKind: NodeResourceTopologyList - plural: noderesourcetopologies - shortNames: - - node-res-topo - singular: noderesourcetopology - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: NodeResourceTopology describes node resources and their topology. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - topologyPolicies: - items: - type: string - type: array - zones: - description: ZoneList contains an array of Zone objects. - items: - description: Zone represents a resource topology zone, e.g. socket, - node, die or core. - properties: - attributes: - description: AttributeList contains an array of AttributeInfo objects. - items: - description: AttributeInfo contains one attribute of a Zone. - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - costs: - description: CostList contains an array of CostInfo objects. - items: - description: CostInfo describes the cost (or distance) between - two Zones. - properties: - name: - type: string - value: - format: int64 - type: integer - required: - - name - - value - type: object - type: array - name: - type: string - parent: - type: string - resources: - description: ResourceInfoList contains an array of ResourceInfo - objects. - items: - description: ResourceInfo contains information about one resource - type. - properties: - allocatable: - anyOf: - - type: integer - - type: string - description: Allocatable quantity of the resource, corresponding - to allocatable in node status, i.e. total amount of this - resource available to be used by pods. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - available: - anyOf: - - type: integer - - type: string - description: Available is the amount of this resource currently - available for new (to be scheduled) pods, i.e. Allocatable - minus the resources reserved by currently running pods. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - capacity: - anyOf: - - type: integer - - type: string - description: Capacity of the resource, corresponding to capacity - in node status, i.e. total amount of this resource that - the node has. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - name: - description: Name of the resource. - type: string - required: - - allocatable - - available - - capacity - - name - type: object - type: array - type: - type: string - required: - - name - - type - type: object - type: array - required: - - topologyPolicies - - zones - type: object - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: []