diff --git a/.github/workflows/official-release.yml b/.github/workflows/official-release.yml index 9c2e495..f8863de 100644 --- a/.github/workflows/official-release.yml +++ b/.github/workflows/official-release.yml @@ -6,7 +6,7 @@ on: - main - development tags: - - "containerscale-controller/v*" + - "podscale-controller/v*" - "pod-autoscaler/v*" jobs: diff --git a/.gitignore b/.gitignore index c09ad2f..d83b1a2 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,6 @@ vendor/ # binaries pkg/*/pod-autoscaler pkg/*/pod-replicas-updater -pkg/*/containerscale-controller +pkg/*/podscale-controller pkg/*/http-metrics pkg/*/metrics-exposer diff --git a/Makefile b/Makefile index 55df3b3..460b828 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ MAKEFLAGS += --no-print-directory -COMPONENTS = pod-replicas-updater pod-autoscaler containerscale-controller +COMPONENTS = pod-replicas-updater pod-autoscaler podscale-controller ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -12,7 +12,7 @@ endif all: build test coverage manifests release clean # Build binary -build: fmt manifests test vet +build: fmt test vet $(call action, build) coverage: @@ -29,7 +29,7 @@ fmt: install: install-crds install-rbac -install-crds: manifests +install-crds: @echo "install CRDs manifests" @kubectl apply -f config/crd/bases diff --git a/README.md b/README.md index de44ac7..7042d99 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ The controllers are freely inspired from [sample-controller](https://github.com/ - [Contention Manager](pkg/contention-manager/README.md) - [Pod Replicas Updater](pkg/pod-replicas-updater/README.md) - [Pod Resource Updater](pkg/pod-resource-updater/README.md) -- [Containerscale Controller](pkg/containerscale-controller/README.md) +- [PodScale Controller](pkg/podscale-controller/README.md) - [Recommender](pkg/recommender/README.md) # CRDs code generation diff --git a/artifacts/example-podscale.yaml b/artifacts/example-podscale.yaml index 0663f26..77fd479 100644 --- a/artifacts/example-podscale.yaml +++ b/artifacts/example-podscale.yaml @@ -1,7 +1,7 @@ apiVersion: systemautoscaler.polimi.it/v1beta1 -kind: ContainerScale +kind: PodScale metadata: - name: example-containerscale + name: example-podscale spec: serviceLevelAgreement: name: example-sla diff --git a/config/crd/bases/systemautoscaler.polimi.it_containerscales.yaml b/config/crd/bases/systemautoscaler.polimi.it_podscales.yaml similarity index 68% rename from config/crd/bases/systemautoscaler.polimi.it_containerscales.yaml rename to config/crd/bases/systemautoscaler.polimi.it_podscales.yaml index 241dd1c..bb3a06a 100644 --- a/config/crd/bases/systemautoscaler.polimi.it_containerscales.yaml +++ b/config/crd/bases/systemautoscaler.polimi.it_podscales.yaml @@ -6,21 +6,21 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.3.0 creationTimestamp: null - name: containerscales.systemautoscaler.polimi.it + name: podscales.systemautoscaler.polimi.it spec: group: systemautoscaler.polimi.it names: - kind: ContainerScale - listKind: ContainerScaleList - plural: containerscales - singular: containerscale + kind: PodScale + listKind: PodScaleList + plural: podscales + singular: podscale preserveUnknownFields: false scope: Namespaced versions: - name: v1beta1 schema: openAPIV3Schema: - description: ContainerScale defines the mapping between a `ServiceLevelAgreement` + description: PodScale defines the mapping between a `ServiceLevelAgreement` and a `Pod` matching the selector. It also keeps track of the resource values computed by `Recommender` and adjusted by `Contention Manager`. properties: @@ -37,7 +37,7 @@ spec: metadata: type: object spec: - description: ContainerScaleSpec is the spec for a ContainerScale resource + description: PodScaleSpec is the spec for a PodScale resource properties: container: type: string @@ -50,49 +50,24 @@ spec: x-kubernetes-int-or-string: true description: ResourceList is a set of (resource name, quantity) pairs. type: object + namespace: + type: string pod: - description: PodRef is a reference to a pod - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object + type: string service: - description: ServiceRef is a reference to a service - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object + type: string serviceLevelAgreement: - description: SLARef is a reference to a service level agreement - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object + type: string required: - container + - namespace - pod - service - serviceLevelAgreement type: object status: - description: ContainerScaleStatus contains the resources patched by the - `Contention Manager` according to the available node resources and other - pods' SLA + description: PodScaleStatus contains the resources patched by the `Contention + Manager` according to the available node resources and other pods' SLA properties: actual: additionalProperties: diff --git a/config/permissions/podscale-controller.yaml b/config/permissions/podscale-controller.yaml index cfe09ee..4fa314f 100644 --- a/config/permissions/podscale-controller.yaml +++ b/config/permissions/podscale-controller.yaml @@ -1,14 +1,14 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: containerscale-controller + name: podscale-controller namespace: kube-system automountServiceAccountToken: false --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: system-autoscaler:containerscale-manager + name: system-autoscaler:podscale-manager rules: - apiGroups: [""] resources: ["pods", "services", "nodes"] @@ -20,19 +20,19 @@ rules: resources: ["servicelevelagreements"] verbs: ["get", "watch", "list"] - apiGroups: ["systemautoscaler.polimi.it"] - resources: ["containerscales"] + resources: ["podscales"] verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: system-autoscaler:containerscale-manager + name: system-autoscaler:podscale-manager subjects: - kind: ServiceAccount - name: containerscale-controller + name: podscale-controller namespace: kube-system apiGroup: "" roleRef: kind: ClusterRole - name: system-autoscaler:containerscale-manager + name: system-autoscaler:podscale-manager apiGroup: rbac.authorization.k8s.io diff --git a/examples/benchmark/deploy.sh b/examples/benchmark/deploy.sh index 7a5449d..c2ef92c 100644 --- a/examples/benchmark/deploy.sh +++ b/examples/benchmark/deploy.sh @@ -6,6 +6,6 @@ kubectl apply -f application kubectl apply -f metrics kubectl apply -f system-autoscaler -kubectl cp kube-system/pod-autoscaler-578b5988c8-52brq:var/containerscale.json containerscale.json -c pod-autoscaler +kubectl cp kube-system/pod-autoscaler-578b5988c8-52brq:var/podscale.json podscale.json -c pod-autoscaler kubectl exec postgres-statefulset-0 -- psql -d awesomedb -U amazinguser -c "\copy response_information to /response.csv delimiter ',' csv header;" kubectl cp postgres-statefulset-0:response.csv response.csv diff --git a/examples/benchmark/monitoring/monitoring.yaml b/examples/benchmark/monitoring/monitoring.yaml index 7f09fed..228f949 100644 --- a/examples/benchmark/monitoring/monitoring.yaml +++ b/examples/benchmark/monitoring/monitoring.yaml @@ -20,7 +20,7 @@ rules: resources: ["servicelevelagreements"] verbs: ["get", "watch", "list"] - apiGroups: ["systemautoscaler.polimi.it"] - resources: ["containerscales"] + resources: ["podscales"] verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/examples/benchmark/system-autoscaler/containerscale-controller.yaml b/examples/benchmark/system-autoscaler/containerscale-controller.yaml index 54d9507..46ccb39 100644 --- a/examples/benchmark/system-autoscaler/containerscale-controller.yaml +++ b/examples/benchmark/system-autoscaler/containerscale-controller.yaml @@ -1,14 +1,14 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: containerscale-controller + name: podscale-controller namespace: kube-system automountServiceAccountToken: false --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: system-autoscaler:containerscale-controller + name: system-autoscaler:podscale-controller rules: - apiGroups: [""] resources: ["pods", "services", "nodes"] @@ -20,45 +20,45 @@ rules: resources: ["servicelevelagreements"] verbs: ["get", "watch", "list"] - apiGroups: ["systemautoscaler.polimi.it"] - resources: ["containerscales"] + resources: ["podscales"] verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: system-autoscaler:containerscale-controller + name: system-autoscaler:podscale-controller subjects: - kind: ServiceAccount - name: containerscale-controller + name: podscale-controller namespace: kube-system apiGroup: "" roleRef: kind: ClusterRole - name: system-autoscaler:containerscale-controller + name: system-autoscaler:podscale-controller apiGroup: rbac.authorization.k8s.io --- apiVersion: apps/v1 kind: Deployment metadata: - name: containerscale-controller + name: podscale-controller namespace: kube-system spec: strategy: type: Recreate selector: matchLabels: - app: containerscale-controller + app: podscale-controller replicas: 1 template: metadata: labels: - app: containerscale-controller + app: podscale-controller spec: automountServiceAccountToken: true - serviceAccountName: containerscale-controller + serviceAccountName: podscale-controller containers: - - name: containerscale-controller - image: systemautoscaler/containerscale-controller:0.1.0 + - name: podscale-controller + image: systemautoscaler/podscale-controller:0.1.0 imagePullPolicy: Always resources: limits: diff --git a/examples/benchmark/system-autoscaler/pod-autoscaler.yaml b/examples/benchmark/system-autoscaler/pod-autoscaler.yaml index 3131343..8d748c9 100644 --- a/examples/benchmark/system-autoscaler/pod-autoscaler.yaml +++ b/examples/benchmark/system-autoscaler/pod-autoscaler.yaml @@ -20,7 +20,7 @@ rules: resources: ["servicelevelagreements"] verbs: ["get", "watch", "list"] - apiGroups: ["systemautoscaler.polimi.it"] - resources: ["containerscales"] + resources: ["podscales"] verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/examples/podscale-controller.yaml b/examples/podscale-controller.yaml index 4b73264..b5b31a3 100644 --- a/examples/podscale-controller.yaml +++ b/examples/podscale-controller.yaml @@ -1,25 +1,25 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: containerscale-controller + name: podscale-controller namespace: kube-system spec: strategy: type: Recreate selector: matchLabels: - app: containerscale-controller + app: podscale-controller replicas: 1 template: metadata: labels: - app: containerscale-controller + app: podscale-controller spec: automountServiceAccountToken: true - serviceAccountName: containerscale-controller + serviceAccountName: podscale-controller containers: - - name: containerscale-controller - image: systemautoscaler/containerscale-controller:0.1.0 + - name: podscale-controller + image: systemautoscaler/podscale-controller:0.1.0 imagePullPolicy: Always ports: - containerPort: 443 diff --git a/pkg/apis/systemautoscaler/v1beta1/register.go b/pkg/apis/systemautoscaler/v1beta1/register.go index bfdef0c..eb677a7 100644 --- a/pkg/apis/systemautoscaler/v1beta1/register.go +++ b/pkg/apis/systemautoscaler/v1beta1/register.go @@ -32,8 +32,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ServiceLevelAgreement{}, &ServiceLevelAgreementList{}, - &ContainerScale{}, - &ContainerScaleList{}, + &PodScale{}, + &PodScaleList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/pkg/apis/systemautoscaler/v1beta1/types.go b/pkg/apis/systemautoscaler/v1beta1/types.go index 4891887..91d6620 100644 --- a/pkg/apis/systemautoscaler/v1beta1/types.go +++ b/pkg/apis/systemautoscaler/v1beta1/types.go @@ -87,59 +87,42 @@ type MetricRequirement struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ContainerScaleList is a list of ContainerScale resources -type ContainerScaleList struct { +// PodScaleList is a list of PodScale resources +type PodScaleList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` - Items []ContainerScale `json:"items"` + Items []PodScale `json:"items"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ContainerScale defines the mapping between a `ServiceLevelAgreement` and a +// PodScale defines the mapping between a `ServiceLevelAgreement` and a // `Pod` matching the selector. It also keeps track of the resource values // computed by `Recommender` and adjusted by `Contention Manager`. -type ContainerScale struct { +type PodScale struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ContainerScaleSpec `json:"spec"` - Status ContainerScaleStatus `json:"status"` + Spec PodScaleSpec `json:"spec"` + Status PodScaleStatus `json:"status"` } -// ContainerScaleSpec is the spec for a ContainerScale resource -type ContainerScaleSpec struct { - SLARef SLARef `json:"serviceLevelAgreement"` - PodRef PodRef `json:"pod"` - ServiceRef ServiceRef `json:"service"` +// PodScaleSpec is the spec for a PodScale resource +type PodScaleSpec struct { + Namespace string `json:"namespace"` + SLA string `json:"serviceLevelAgreement"` + Pod string `json:"pod"` + Service string `json:"service"` Container string `json:"container"` DesiredResources v1.ResourceList `json:"desired,omitempty" protobuf:"bytes,3,rep,name=desired,casttype=ResourceList,castkey=ResourceName"` } -// PodRef is a reference to a pod -type PodRef struct { - Name string `json:"name"` - Namespace string `json:"namespace"` -} - -// SLARef is a reference to a service level agreement -type SLARef struct { - Name string `json:"name"` - Namespace string `json:"namespace"` -} - -// ServiceRef is a reference to a service -type ServiceRef struct { - Name string `json:"name"` - Namespace string `json:"namespace"` -} - -// ContainerScaleStatus contains the resources patched by the +// PodScaleStatus contains the resources patched by the // `Contention Manager` according to the available node resources // and other pods' SLA -type ContainerScaleStatus struct { +type PodScaleStatus struct { CappedResources v1.ResourceList `json:"capped,omitempty" protobuf:"bytes,3,rep,name=actual,casttype=ResourceList,castkey=ResourceName"` ActualResources v1.ResourceList `json:"actual,omitempty" protobuf:"bytes,3,rep,name=actual,casttype=ResourceList,castkey=ResourceName"` } diff --git a/pkg/apis/systemautoscaler/v1beta1/zz_generated.deepcopy.go b/pkg/apis/systemautoscaler/v1beta1/zz_generated.deepcopy.go index fec3060..4767f74 100644 --- a/pkg/apis/systemautoscaler/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/systemautoscaler/v1beta1/zz_generated.deepcopy.go @@ -11,7 +11,24 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerScale) DeepCopyInto(out *ContainerScale) { +func (in *MetricRequirement) DeepCopyInto(out *MetricRequirement) { + *out = *in + out.ResponseTime = in.ResponseTime.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricRequirement. +func (in *MetricRequirement) DeepCopy() *MetricRequirement { + if in == nil { + return nil + } + out := new(MetricRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodScale) DeepCopyInto(out *PodScale) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -20,18 +37,18 @@ func (in *ContainerScale) DeepCopyInto(out *ContainerScale) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerScale. -func (in *ContainerScale) DeepCopy() *ContainerScale { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScale. +func (in *PodScale) DeepCopy() *PodScale { if in == nil { return nil } - out := new(ContainerScale) + out := new(PodScale) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ContainerScale) DeepCopyObject() runtime.Object { +func (in *PodScale) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -39,13 +56,13 @@ func (in *ContainerScale) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerScaleList) DeepCopyInto(out *ContainerScaleList) { +func (in *PodScaleList) DeepCopyInto(out *PodScaleList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]ContainerScale, len(*in)) + *out = make([]PodScale, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -53,18 +70,18 @@ func (in *ContainerScaleList) DeepCopyInto(out *ContainerScaleList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerScaleList. -func (in *ContainerScaleList) DeepCopy() *ContainerScaleList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScaleList. +func (in *PodScaleList) DeepCopy() *PodScaleList { if in == nil { return nil } - out := new(ContainerScaleList) + out := new(PodScaleList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ContainerScaleList) DeepCopyObject() runtime.Object { +func (in *PodScaleList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -72,10 +89,8 @@ func (in *ContainerScaleList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerScaleSpec) DeepCopyInto(out *ContainerScaleSpec) { +func (in *PodScaleSpec) DeepCopyInto(out *PodScaleSpec) { *out = *in - out.SLARef = in.SLARef - out.PodRef = in.PodRef if in.DesiredResources != nil { in, out := &in.DesiredResources, &out.DesiredResources *out = make(v1.ResourceList, len(*in)) @@ -86,19 +101,26 @@ func (in *ContainerScaleSpec) DeepCopyInto(out *ContainerScaleSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerScaleSpec. -func (in *ContainerScaleSpec) DeepCopy() *ContainerScaleSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScaleSpec. +func (in *PodScaleSpec) DeepCopy() *PodScaleSpec { if in == nil { return nil } - out := new(ContainerScaleSpec) + out := new(PodScaleSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerScaleStatus) DeepCopyInto(out *ContainerScaleStatus) { +func (in *PodScaleStatus) DeepCopyInto(out *PodScaleStatus) { *out = *in + if in.CappedResources != nil { + in, out := &in.CappedResources, &out.CappedResources + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } if in.ActualResources != nil { in, out := &in.ActualResources, &out.ActualResources *out = make(v1.ResourceList, len(*in)) @@ -109,61 +131,12 @@ func (in *ContainerScaleStatus) DeepCopyInto(out *ContainerScaleStatus) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerScaleStatus. -func (in *ContainerScaleStatus) DeepCopy() *ContainerScaleStatus { - if in == nil { - return nil - } - out := new(ContainerScaleStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricRequirement) DeepCopyInto(out *MetricRequirement) { - *out = *in - out.ResponseTime = in.ResponseTime.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricRequirement. -func (in *MetricRequirement) DeepCopy() *MetricRequirement { - if in == nil { - return nil - } - out := new(MetricRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodRef) DeepCopyInto(out *PodRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodRef. -func (in *PodRef) DeepCopy() *PodRef { - if in == nil { - return nil - } - out := new(PodRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SLARef) DeepCopyInto(out *SLARef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SLARef. -func (in *SLARef) DeepCopy() *SLARef { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScaleStatus. +func (in *PodScaleStatus) DeepCopy() *PodScaleStatus { if in == nil { return nil } - out := new(SLARef) + out := new(PodScaleStatus) in.DeepCopyInto(out) return out } diff --git a/pkg/containerscale-controller/Dockerfile b/pkg/containerscale-controller/Dockerfile deleted file mode 100644 index d43568f..0000000 --- a/pkg/containerscale-controller/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM gcr.io/distroless/static:nonroot - -LABEL name="ContainerScale Controller" - -COPY containerscale-controller /usr/local/bin/ - -CMD ["containerscale-controller"] diff --git a/pkg/containerscale-controller/README.md b/pkg/containerscale-controller/README.md deleted file mode 100644 index 02ff416..0000000 --- a/pkg/containerscale-controller/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# ContainerScale Controller - -ContainerScale Controller manages `ContainerScale` resources lifecycle, dealing with their creation and deletion. - -## ContainerScale lifecyle - -Once a new `ServiceLevelAgreement` is deployed into a namespace, the controller will try to find a set of `Services` compatible with the `serviceSelector` and will create a new `ContainerScale` for each `Pod`. The match is currently done by setting the `MatchLabels` field inside the Selector but a further analysis has to be done regarding the `Selector` strategy since the `MatchExpressions` will not be used. -After the `ContainerScale` creation, the controller will try to keep the set of `ContainerScale` up to date with `Pod` resources, handling changes in the number of replicas and `Pod` deletions. What is not covered at the moment is specified in this [issue] (https://github.com/lterrac/system-autoscaler/issues/2). -When the `ServiceLevelAgreement` is deleted from the namespace, all the `ContainerScale` resources generated from it will be also deleted, leaving the namespace as it was before introducing the Agreement. diff --git a/pkg/containerscale-controller/pkg/types/types.go b/pkg/containerscale-controller/pkg/types/types.go deleted file mode 100644 index d0b1369..0000000 --- a/pkg/containerscale-controller/pkg/types/types.go +++ /dev/null @@ -1,35 +0,0 @@ -package types - -import ( - "fmt" - "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" -) - -// NodeScales is used to group containerscales by node. -type NodeScales struct { - Node string - ContainerScales []*v1beta1.ContainerScale -} - -func (n *NodeScales) Contains(name, namespace string) bool { - for _, containerscale := range n.ContainerScales { - podReference := containerscale.Spec.PodRef - if podReference.Namespace == namespace && - podReference.Name == name { - return true - } - } - return false -} - -func (n *NodeScales) Remove(name, namespace string) (*v1beta1.ContainerScale, error) { - for i, containerscale := range n.ContainerScales { - podReference := containerscale.Spec.PodRef - if podReference.Namespace == name && - podReference.Name == namespace { - n.ContainerScales = append(n.ContainerScales[:i], n.ContainerScales[i+1:]...) - return containerscale, nil - } - } - return nil, fmt.Errorf("error: missing %#v-%#v in node %#v", namespace, name, n.Node) -} diff --git a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/containerscale.go b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/containerscale.go deleted file mode 100644 index 79ba8c5..0000000 --- a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/containerscale.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - scheme "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ContainerScalesGetter has a method to return a ContainerScaleInterface. -// A group's client should implement this interface. -type ContainerScalesGetter interface { - ContainerScales(namespace string) ContainerScaleInterface -} - -// ContainerScaleInterface has methods to work with ContainerScale resources. -type ContainerScaleInterface interface { - Create(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.CreateOptions) (*v1beta1.ContainerScale, error) - Update(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.UpdateOptions) (*v1beta1.ContainerScale, error) - UpdateStatus(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.UpdateOptions) (*v1beta1.ContainerScale, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ContainerScale, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ContainerScaleList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ContainerScale, err error) - ContainerScaleExpansion -} - -// containerScales implements ContainerScaleInterface -type containerScales struct { - client rest.Interface - ns string -} - -// newContainerScales returns a ContainerScales -func newContainerScales(c *SystemautoscalerV1beta1Client, namespace string) *containerScales { - return &containerScales{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the containerScale, and returns the corresponding containerScale object, and an error if there is any. -func (c *containerScales) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ContainerScale, err error) { - result = &v1beta1.ContainerScale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("containerscales"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ContainerScales that match those selectors. -func (c *containerScales) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ContainerScaleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ContainerScaleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("containerscales"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested containerScales. -func (c *containerScales) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("containerscales"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a containerScale and creates it. Returns the server's representation of the containerScale, and an error, if there is any. -func (c *containerScales) Create(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.CreateOptions) (result *v1beta1.ContainerScale, err error) { - result = &v1beta1.ContainerScale{} - err = c.client.Post(). - Namespace(c.ns). - Resource("containerscales"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(containerScale). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a containerScale and updates it. Returns the server's representation of the containerScale, and an error, if there is any. -func (c *containerScales) Update(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.UpdateOptions) (result *v1beta1.ContainerScale, err error) { - result = &v1beta1.ContainerScale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("containerscales"). - Name(containerScale.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(containerScale). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *containerScales) UpdateStatus(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.UpdateOptions) (result *v1beta1.ContainerScale, err error) { - result = &v1beta1.ContainerScale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("containerscales"). - Name(containerScale.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(containerScale). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the containerScale and deletes it. Returns an error if one occurs. -func (c *containerScales) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("containerscales"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *containerScales) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("containerscales"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched containerScale. -func (c *containerScales) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ContainerScale, err error) { - result = &v1beta1.ContainerScale{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("containerscales"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_containerscale.go b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_containerscale.go deleted file mode 100644 index e5957a5..0000000 --- a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_containerscale.go +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeContainerScales implements ContainerScaleInterface -type FakeContainerScales struct { - Fake *FakeSystemautoscalerV1beta1 - ns string -} - -var containerscalesResource = schema.GroupVersionResource{Group: "systemautoscaler.polimi.it", Version: "v1beta1", Resource: "containerscales"} - -var containerscalesKind = schema.GroupVersionKind{Group: "systemautoscaler.polimi.it", Version: "v1beta1", Kind: "ContainerScale"} - -// Get takes name of the containerScale, and returns the corresponding containerScale object, and an error if there is any. -func (c *FakeContainerScales) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ContainerScale, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(containerscalesResource, c.ns, name), &v1beta1.ContainerScale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ContainerScale), err -} - -// List takes label and field selectors, and returns the list of ContainerScales that match those selectors. -func (c *FakeContainerScales) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ContainerScaleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(containerscalesResource, containerscalesKind, c.ns, opts), &v1beta1.ContainerScaleList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ContainerScaleList{ListMeta: obj.(*v1beta1.ContainerScaleList).ListMeta} - for _, item := range obj.(*v1beta1.ContainerScaleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested containerScales. -func (c *FakeContainerScales) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(containerscalesResource, c.ns, opts)) - -} - -// Create takes the representation of a containerScale and creates it. Returns the server's representation of the containerScale, and an error, if there is any. -func (c *FakeContainerScales) Create(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.CreateOptions) (result *v1beta1.ContainerScale, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(containerscalesResource, c.ns, containerScale), &v1beta1.ContainerScale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ContainerScale), err -} - -// Update takes the representation of a containerScale and updates it. Returns the server's representation of the containerScale, and an error, if there is any. -func (c *FakeContainerScales) Update(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.UpdateOptions) (result *v1beta1.ContainerScale, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(containerscalesResource, c.ns, containerScale), &v1beta1.ContainerScale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ContainerScale), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeContainerScales) UpdateStatus(ctx context.Context, containerScale *v1beta1.ContainerScale, opts v1.UpdateOptions) (*v1beta1.ContainerScale, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(containerscalesResource, "status", c.ns, containerScale), &v1beta1.ContainerScale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ContainerScale), err -} - -// Delete takes name of the containerScale and deletes it. Returns an error if one occurs. -func (c *FakeContainerScales) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(containerscalesResource, c.ns, name), &v1beta1.ContainerScale{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeContainerScales) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(containerscalesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ContainerScaleList{}) - return err -} - -// Patch applies the patch and returns the patched containerScale. -func (c *FakeContainerScales) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ContainerScale, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(containerscalesResource, c.ns, name, pt, data, subresources...), &v1beta1.ContainerScale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ContainerScale), err -} diff --git a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_podscale.go b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_podscale.go new file mode 100644 index 0000000..7646e5a --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_podscale.go @@ -0,0 +1,126 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePodScales implements PodScaleInterface +type FakePodScales struct { + Fake *FakeSystemautoscalerV1beta1 + ns string +} + +var podscalesResource = schema.GroupVersionResource{Group: "systemautoscaler.polimi.it", Version: "v1beta1", Resource: "podscales"} + +var podscalesKind = schema.GroupVersionKind{Group: "systemautoscaler.polimi.it", Version: "v1beta1", Kind: "PodScale"} + +// Get takes name of the podScale, and returns the corresponding podScale object, and an error if there is any. +func (c *FakePodScales) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodScale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podscalesResource, c.ns, name), &v1beta1.PodScale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodScale), err +} + +// List takes label and field selectors, and returns the list of PodScales that match those selectors. +func (c *FakePodScales) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodScaleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podscalesResource, podscalesKind, c.ns, opts), &v1beta1.PodScaleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.PodScaleList{ListMeta: obj.(*v1beta1.PodScaleList).ListMeta} + for _, item := range obj.(*v1beta1.PodScaleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podScales. +func (c *FakePodScales) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podscalesResource, c.ns, opts)) + +} + +// Create takes the representation of a podScale and creates it. Returns the server's representation of the podScale, and an error, if there is any. +func (c *FakePodScales) Create(ctx context.Context, podScale *v1beta1.PodScale, opts v1.CreateOptions) (result *v1beta1.PodScale, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podscalesResource, c.ns, podScale), &v1beta1.PodScale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodScale), err +} + +// Update takes the representation of a podScale and updates it. Returns the server's representation of the podScale, and an error, if there is any. +func (c *FakePodScales) Update(ctx context.Context, podScale *v1beta1.PodScale, opts v1.UpdateOptions) (result *v1beta1.PodScale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podscalesResource, c.ns, podScale), &v1beta1.PodScale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodScale), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePodScales) UpdateStatus(ctx context.Context, podScale *v1beta1.PodScale, opts v1.UpdateOptions) (*v1beta1.PodScale, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podscalesResource, "status", c.ns, podScale), &v1beta1.PodScale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodScale), err +} + +// Delete takes name of the podScale and deletes it. Returns an error if one occurs. +func (c *FakePodScales) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podscalesResource, c.ns, name), &v1beta1.PodScale{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodScales) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podscalesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.PodScaleList{}) + return err +} + +// Patch applies the patch and returns the patched podScale. +func (c *FakePodScales) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodScale, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podscalesResource, c.ns, name, pt, data, subresources...), &v1beta1.PodScale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodScale), err +} diff --git a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_systemautoscaler_client.go b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_systemautoscaler_client.go index 9d31a52..6bb6cc1 100644 --- a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_systemautoscaler_client.go +++ b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/fake/fake_systemautoscaler_client.go @@ -12,8 +12,8 @@ type FakeSystemautoscalerV1beta1 struct { *testing.Fake } -func (c *FakeSystemautoscalerV1beta1) ContainerScales(namespace string) v1beta1.ContainerScaleInterface { - return &FakeContainerScales{c, namespace} +func (c *FakeSystemautoscalerV1beta1) PodScales(namespace string) v1beta1.PodScaleInterface { + return &FakePodScales{c, namespace} } func (c *FakeSystemautoscalerV1beta1) ServiceLevelAgreements(namespace string) v1beta1.ServiceLevelAgreementInterface { diff --git a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/generated_expansion.go index aedc304..329c60c 100644 --- a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/generated_expansion.go @@ -2,6 +2,6 @@ package v1beta1 -type ContainerScaleExpansion interface{} +type PodScaleExpansion interface{} type ServiceLevelAgreementExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/podscale.go b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/podscale.go new file mode 100644 index 0000000..e72b03c --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/podscale.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" + scheme "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodScalesGetter has a method to return a PodScaleInterface. +// A group's client should implement this interface. +type PodScalesGetter interface { + PodScales(namespace string) PodScaleInterface +} + +// PodScaleInterface has methods to work with PodScale resources. +type PodScaleInterface interface { + Create(ctx context.Context, podScale *v1beta1.PodScale, opts v1.CreateOptions) (*v1beta1.PodScale, error) + Update(ctx context.Context, podScale *v1beta1.PodScale, opts v1.UpdateOptions) (*v1beta1.PodScale, error) + UpdateStatus(ctx context.Context, podScale *v1beta1.PodScale, opts v1.UpdateOptions) (*v1beta1.PodScale, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodScale, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodScaleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodScale, err error) + PodScaleExpansion +} + +// podScales implements PodScaleInterface +type podScales struct { + client rest.Interface + ns string +} + +// newPodScales returns a PodScales +func newPodScales(c *SystemautoscalerV1beta1Client, namespace string) *podScales { + return &podScales{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podScale, and returns the corresponding podScale object, and an error if there is any. +func (c *podScales) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodScale, err error) { + result = &v1beta1.PodScale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podscales"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodScales that match those selectors. +func (c *podScales) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodScaleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PodScaleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podscales"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podScales. +func (c *podScales) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podscales"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podScale and creates it. Returns the server's representation of the podScale, and an error, if there is any. +func (c *podScales) Create(ctx context.Context, podScale *v1beta1.PodScale, opts v1.CreateOptions) (result *v1beta1.PodScale, err error) { + result = &v1beta1.PodScale{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podscales"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podScale). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podScale and updates it. Returns the server's representation of the podScale, and an error, if there is any. +func (c *podScales) Update(ctx context.Context, podScale *v1beta1.PodScale, opts v1.UpdateOptions) (result *v1beta1.PodScale, err error) { + result = &v1beta1.PodScale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podscales"). + Name(podScale.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podScale). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *podScales) UpdateStatus(ctx context.Context, podScale *v1beta1.PodScale, opts v1.UpdateOptions) (result *v1beta1.PodScale, err error) { + result = &v1beta1.PodScale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podscales"). + Name(podScale.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podScale). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podScale and deletes it. Returns an error if one occurs. +func (c *podScales) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podscales"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podScales) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podscales"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podScale. +func (c *podScales) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodScale, err error) { + result = &v1beta1.PodScale{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podscales"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/systemautoscaler_client.go b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/systemautoscaler_client.go index d236de4..b62c8d6 100644 --- a/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/systemautoscaler_client.go +++ b/pkg/generated/clientset/versioned/typed/systemautoscaler/v1beta1/systemautoscaler_client.go @@ -10,7 +10,7 @@ import ( type SystemautoscalerV1beta1Interface interface { RESTClient() rest.Interface - ContainerScalesGetter + PodScalesGetter ServiceLevelAgreementsGetter } @@ -19,8 +19,8 @@ type SystemautoscalerV1beta1Client struct { restClient rest.Interface } -func (c *SystemautoscalerV1beta1Client) ContainerScales(namespace string) ContainerScaleInterface { - return newContainerScales(c, namespace) +func (c *SystemautoscalerV1beta1Client) PodScales(namespace string) PodScaleInterface { + return newPodScales(c, namespace) } func (c *SystemautoscalerV1beta1Client) ServiceLevelAgreements(namespace string) ServiceLevelAgreementInterface { diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 95a3134..9646717 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -37,8 +37,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=systemautoscaler.polimi.it, Version=v1beta1 - case v1beta1.SchemeGroupVersion.WithResource("containerscales"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Systemautoscaler().V1beta1().ContainerScales().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("podscales"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Systemautoscaler().V1beta1().PodScales().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("servicelevelagreements"): return &genericInformer{resource: resource.GroupResource(), informer: f.Systemautoscaler().V1beta1().ServiceLevelAgreements().Informer()}, nil diff --git a/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/containerscale.go b/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/containerscale.go deleted file mode 100644 index 47e35d0..0000000 --- a/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/containerscale.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - time "time" - - systemautoscalerv1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - versioned "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" - internalinterfaces "github.com/lterrac/system-autoscaler/pkg/generated/informers/externalversions/internalinterfaces" - v1beta1 "github.com/lterrac/system-autoscaler/pkg/generated/listers/systemautoscaler/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// ContainerScaleInformer provides access to a shared informer and lister for -// ContainerScales. -type ContainerScaleInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.ContainerScaleLister -} - -type containerScaleInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewContainerScaleInformer constructs a new informer for ContainerScale type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewContainerScaleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredContainerScaleInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredContainerScaleInformer constructs a new informer for ContainerScale type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredContainerScaleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SystemautoscalerV1beta1().ContainerScales(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SystemautoscalerV1beta1().ContainerScales(namespace).Watch(context.TODO(), options) - }, - }, - &systemautoscalerv1beta1.ContainerScale{}, - resyncPeriod, - indexers, - ) -} - -func (f *containerScaleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredContainerScaleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *containerScaleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&systemautoscalerv1beta1.ContainerScale{}, f.defaultInformer) -} - -func (f *containerScaleInformer) Lister() v1beta1.ContainerScaleLister { - return v1beta1.NewContainerScaleLister(f.Informer().GetIndexer()) -} diff --git a/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/interface.go b/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/interface.go index 8f4771f..f6eeecd 100644 --- a/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/interface.go +++ b/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/interface.go @@ -8,8 +8,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // ContainerScales returns a ContainerScaleInformer. - ContainerScales() ContainerScaleInformer + // PodScales returns a PodScaleInformer. + PodScales() PodScaleInformer // ServiceLevelAgreements returns a ServiceLevelAgreementInformer. ServiceLevelAgreements() ServiceLevelAgreementInformer } @@ -25,9 +25,9 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// ContainerScales returns a ContainerScaleInformer. -func (v *version) ContainerScales() ContainerScaleInformer { - return &containerScaleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// PodScales returns a PodScaleInformer. +func (v *version) PodScales() PodScaleInformer { + return &podScaleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // ServiceLevelAgreements returns a ServiceLevelAgreementInformer. diff --git a/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/podscale.go b/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/podscale.go new file mode 100644 index 0000000..57b4c03 --- /dev/null +++ b/pkg/generated/informers/externalversions/systemautoscaler/v1beta1/podscale.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + systemautoscalerv1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" + versioned "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" + internalinterfaces "github.com/lterrac/system-autoscaler/pkg/generated/informers/externalversions/internalinterfaces" + v1beta1 "github.com/lterrac/system-autoscaler/pkg/generated/listers/systemautoscaler/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PodScaleInformer provides access to a shared informer and lister for +// PodScales. +type PodScaleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.PodScaleLister +} + +type podScaleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodScaleInformer constructs a new informer for PodScale type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodScaleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodScaleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodScaleInformer constructs a new informer for PodScale type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodScaleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SystemautoscalerV1beta1().PodScales(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SystemautoscalerV1beta1().PodScales(namespace).Watch(context.TODO(), options) + }, + }, + &systemautoscalerv1beta1.PodScale{}, + resyncPeriod, + indexers, + ) +} + +func (f *podScaleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodScaleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podScaleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&systemautoscalerv1beta1.PodScale{}, f.defaultInformer) +} + +func (f *podScaleInformer) Lister() v1beta1.PodScaleLister { + return v1beta1.NewPodScaleLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/listers/systemautoscaler/v1beta1/containerscale.go b/pkg/generated/listers/systemautoscaler/v1beta1/containerscale.go deleted file mode 100644 index b670206..0000000 --- a/pkg/generated/listers/systemautoscaler/v1beta1/containerscale.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ContainerScaleLister helps list ContainerScales. -// All objects returned here must be treated as read-only. -type ContainerScaleLister interface { - // List lists all ContainerScales in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ContainerScale, err error) - // ContainerScales returns an object that can list and get ContainerScales. - ContainerScales(namespace string) ContainerScaleNamespaceLister - ContainerScaleListerExpansion -} - -// containerScaleLister implements the ContainerScaleLister interface. -type containerScaleLister struct { - indexer cache.Indexer -} - -// NewContainerScaleLister returns a new ContainerScaleLister. -func NewContainerScaleLister(indexer cache.Indexer) ContainerScaleLister { - return &containerScaleLister{indexer: indexer} -} - -// List lists all ContainerScales in the indexer. -func (s *containerScaleLister) List(selector labels.Selector) (ret []*v1beta1.ContainerScale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ContainerScale)) - }) - return ret, err -} - -// ContainerScales returns an object that can list and get ContainerScales. -func (s *containerScaleLister) ContainerScales(namespace string) ContainerScaleNamespaceLister { - return containerScaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ContainerScaleNamespaceLister helps list and get ContainerScales. -// All objects returned here must be treated as read-only. -type ContainerScaleNamespaceLister interface { - // List lists all ContainerScales in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ContainerScale, err error) - // Get retrieves the ContainerScale from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ContainerScale, error) - ContainerScaleNamespaceListerExpansion -} - -// containerScaleNamespaceLister implements the ContainerScaleNamespaceLister -// interface. -type containerScaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ContainerScales in the indexer for a given namespace. -func (s containerScaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ContainerScale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ContainerScale)) - }) - return ret, err -} - -// Get retrieves the ContainerScale from the indexer for a given namespace and name. -func (s containerScaleNamespaceLister) Get(name string) (*v1beta1.ContainerScale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("containerscale"), name) - } - return obj.(*v1beta1.ContainerScale), nil -} diff --git a/pkg/generated/listers/systemautoscaler/v1beta1/expansion_generated.go b/pkg/generated/listers/systemautoscaler/v1beta1/expansion_generated.go index 71964b7..4f60bd5 100644 --- a/pkg/generated/listers/systemautoscaler/v1beta1/expansion_generated.go +++ b/pkg/generated/listers/systemautoscaler/v1beta1/expansion_generated.go @@ -2,13 +2,13 @@ package v1beta1 -// ContainerScaleListerExpansion allows custom methods to be added to -// ContainerScaleLister. -type ContainerScaleListerExpansion interface{} +// PodScaleListerExpansion allows custom methods to be added to +// PodScaleLister. +type PodScaleListerExpansion interface{} -// ContainerScaleNamespaceListerExpansion allows custom methods to be added to -// ContainerScaleNamespaceLister. -type ContainerScaleNamespaceListerExpansion interface{} +// PodScaleNamespaceListerExpansion allows custom methods to be added to +// PodScaleNamespaceLister. +type PodScaleNamespaceListerExpansion interface{} // ServiceLevelAgreementListerExpansion allows custom methods to be added to // ServiceLevelAgreementLister. diff --git a/pkg/generated/listers/systemautoscaler/v1beta1/podscale.go b/pkg/generated/listers/systemautoscaler/v1beta1/podscale.go new file mode 100644 index 0000000..6bf8171 --- /dev/null +++ b/pkg/generated/listers/systemautoscaler/v1beta1/podscale.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodScaleLister helps list PodScales. +// All objects returned here must be treated as read-only. +type PodScaleLister interface { + // List lists all PodScales in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.PodScale, err error) + // PodScales returns an object that can list and get PodScales. + PodScales(namespace string) PodScaleNamespaceLister + PodScaleListerExpansion +} + +// podScaleLister implements the PodScaleLister interface. +type podScaleLister struct { + indexer cache.Indexer +} + +// NewPodScaleLister returns a new PodScaleLister. +func NewPodScaleLister(indexer cache.Indexer) PodScaleLister { + return &podScaleLister{indexer: indexer} +} + +// List lists all PodScales in the indexer. +func (s *podScaleLister) List(selector labels.Selector) (ret []*v1beta1.PodScale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodScale)) + }) + return ret, err +} + +// PodScales returns an object that can list and get PodScales. +func (s *podScaleLister) PodScales(namespace string) PodScaleNamespaceLister { + return podScaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodScaleNamespaceLister helps list and get PodScales. +// All objects returned here must be treated as read-only. +type PodScaleNamespaceLister interface { + // List lists all PodScales in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.PodScale, err error) + // Get retrieves the PodScale from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.PodScale, error) + PodScaleNamespaceListerExpansion +} + +// podScaleNamespaceLister implements the PodScaleNamespaceLister +// interface. +type podScaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodScales in the indexer for a given namespace. +func (s podScaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.PodScale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodScale)) + }) + return ret, err +} + +// Get retrieves the PodScale from the indexer for a given namespace and name. +func (s podScaleNamespaceLister) Get(name string) (*v1beta1.PodScale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("podscale"), name) + } + return obj.(*v1beta1.PodScale), nil +} diff --git a/pkg/informers/informers.go b/pkg/informers/informers.go index 67144ea..73804d1 100644 --- a/pkg/informers/informers.go +++ b/pkg/informers/informers.go @@ -11,7 +11,7 @@ type Informers struct { Pod coreinformers.PodInformer Node coreinformers.NodeInformer Service coreinformers.ServiceInformer - ContainerScale sainformers.ContainerScaleInformer + PodScale sainformers.PodScaleInformer ServiceLevelAgreement sainformers.ServiceLevelAgreementInformer } @@ -20,7 +20,7 @@ func (i *Informers) GetListers() Listers { i.Pod.Lister(), i.Node.Lister(), i.Service.Lister(), - i.ContainerScale.Lister(), + i.PodScale.Lister(), i.ServiceLevelAgreement.Lister(), } } @@ -29,6 +29,6 @@ type Listers struct { corelisters.PodLister corelisters.NodeLister corelisters.ServiceLister - salisters.ContainerScaleLister + salisters.PodScaleLister salisters.ServiceLevelAgreementLister } diff --git a/pkg/metrics-exposer/configure.yaml b/pkg/metrics-exposer/configure.yaml index f0c7157..bbf9e5e 100644 --- a/pkg/metrics-exposer/configure.yaml +++ b/pkg/metrics-exposer/configure.yaml @@ -155,5 +155,5 @@ rules: resources: [ "servicelevelagreements" ] verbs: [ "get", "watch", "list" ] - apiGroups: [ "systemautoscaler.polimi.it" ] - resources: [ "containerscales" ] + resources: [ "podscales" ] verbs: [ "get", "watch", "list" ] diff --git a/pkg/metrics-exposer/main.go b/pkg/metrics-exposer/main.go index 65b89ef..9bd1740 100644 --- a/pkg/metrics-exposer/main.go +++ b/pkg/metrics-exposer/main.go @@ -2,6 +2,9 @@ package main import ( "flag" + "os" + "time" + clientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" sainformers "github.com/lterrac/system-autoscaler/pkg/generated/informers/externalversions" informers2 "github.com/lterrac/system-autoscaler/pkg/informers" @@ -12,8 +15,6 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/component-base/logs" "k8s.io/klog/v2" - "os" - "time" "github.com/kubernetes-sigs/custom-metrics-apiserver/pkg/apiserver" basecmd "github.com/kubernetes-sigs/custom-metrics-apiserver/pkg/cmd" @@ -87,7 +88,7 @@ func main() { Pod: coreInformerFactory.Core().V1().Pods(), Node: coreInformerFactory.Core().V1().Nodes(), Service: coreInformerFactory.Core().V1().Services(), - ContainerScale: saInformerFactory.Systemautoscaler().V1beta1().ContainerScales(), + PodScale: saInformerFactory.Systemautoscaler().V1beta1().PodScales(), ServiceLevelAgreement: saInformerFactory.Systemautoscaler().V1beta1().ServiceLevelAgreements(), } @@ -98,16 +99,16 @@ func main() { go informers.Pod.Informer().Run(stopCh) go informers.Node.Informer().Run(stopCh) go informers.Service.Informer().Run(stopCh) - go informers.ContainerScale.Informer().Run(stopCh) + go informers.PodScale.Informer().Run(stopCh) go informers.ServiceLevelAgreement.Informer().Run(stopCh) if ok := cache.WaitForCacheSync( stopCh, informers.Pod.Informer().HasSynced, - informers.ContainerScale.Informer().HasSynced, + informers.PodScale.Informer().HasSynced, informers.Service.Informer().HasSynced, informers.ServiceLevelAgreement.Informer().HasSynced, - ); !ok { + ); !ok { klog.Fatalf("failed to wait for caches to sync") } diff --git a/pkg/metrics-exposer/pkg/provider/sync.go b/pkg/metrics-exposer/pkg/provider/sync.go index a94bd87..b3c621c 100644 --- a/pkg/metrics-exposer/pkg/provider/sync.go +++ b/pkg/metrics-exposer/pkg/provider/sync.go @@ -27,8 +27,7 @@ func (p *responseTimeMetricsProvider) updateMetrics() { var podMetrics *Metrics var err error - // TODO: retrieve the all the pods - containerScales, err := p.informers.ContainerScale.Lister().List(labels.Everything()) + podScales, err := p.informers.PodScale.Lister().List(labels.Everything()) if err != nil { klog.Error("failed to retrieve to retrieve the container scales") return @@ -36,60 +35,60 @@ func (p *responseTimeMetricsProvider) updateMetrics() { serviceMetricsMap := make(map[string]map[string][]*Metrics) - for _, containerScale := range containerScales { + for _, podScale := range podScales { - podNamespace := containerScale.Spec.PodRef.Namespace - podName := containerScale.Spec.PodRef.Name + namespace := podScale.Spec.Namespace + podName := podScale.Spec.Pod - serviceNamespace := containerScale.Spec.ServiceRef.Namespace - serviceName := containerScale.Spec.ServiceRef.Name + serviceName := podScale.Spec.Service - pod, err := p.informers.Pod.Lister().Pods(podNamespace).Get(podName) + pod, err := p.informers.Pod.Lister().Pods(namespace).Get(podName) if err != nil { - klog.Errorf("failed to retrieve the pod with name %s and namespace %s", podName, podNamespace) + klog.Errorf("failed to retrieve to retrieve the pod with name %s and namespace %s", podName, namespace) continue } podMetrics, err = p.PodMetrics(pod) if err != nil { - klog.Errorf("failed to retrieve metrics for pod with name %s and namespace %s", podName, podNamespace) + klog.Error("failed to retrieve the metrics for pod with name %s and namespace %s", podName, namespace) continue } - err = p.updatePodMetric(podName, podNamespace, metrics.ResponseTime, *podMetrics.ResponseTime) + err = p.updatePodMetric(podName, namespace, metrics.ResponseTime, *podMetrics.ResponseTime) if err != nil { - klog.Errorf("error while updating response time for pod with name %s and namespace %s", podName, podNamespace) + klog.Errorf("error while updating response time for pod with name %s and namespace %s", podName, namespace) continue } - err = p.updatePodMetric(podName, podNamespace, metrics.RequestCount, *podMetrics.RequestCount) + err = p.updatePodMetric(podName, namespace, metrics.RequestCount, *podMetrics.RequestCount) if err != nil { - klog.Errorf("error while updating request count for pod with name %s and namespace %s", podName, podNamespace) + klog.Errorf("error while updating request count for pod with name %s and namespace %s", podName, namespace) continue } - err = p.updatePodMetric(podName, podNamespace, metrics.Throughput, *podMetrics.Throughput) + err = p.updatePodMetric(podName, namespace, metrics.Throughput, *podMetrics.Throughput) if err != nil { - klog.Errorf("error while updating throughput for pod with name %s and namespace %s", podName, podNamespace) + klog.Errorf("error while updating throughput for pod with name %s and namespace %s", podName, namespace) continue } - if _, ok := serviceMetricsMap[serviceNamespace]; !ok { - serviceMetricsMap[serviceNamespace] = make(map[string][]*Metrics) + if _, ok := serviceMetricsMap[namespace]; !ok { + serviceMetricsMap[namespace] = make(map[string][]*Metrics) } // group metrics by service - serviceMetrics, ok := serviceMetricsMap[serviceNamespace][serviceName] + serviceMetrics, ok := serviceMetricsMap[namespace][serviceName] if !ok { serviceMetrics = make([]*Metrics, 0) } - serviceMetricsMap[serviceNamespace][serviceName] = append(serviceMetrics, podMetrics) + serviceMetricsMap[namespace][serviceName] = append(serviceMetrics, podMetrics) + } for namespace, nestedMap := range serviceMetricsMap { diff --git a/pkg/pod-autoscaler/main.go b/pkg/pod-autoscaler/main.go index 55367cc..769f2cc 100644 --- a/pkg/pod-autoscaler/main.go +++ b/pkg/pod-autoscaler/main.go @@ -8,10 +8,10 @@ import ( informers2 "github.com/lterrac/system-autoscaler/pkg/informers" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/types" sainformers "github.com/lterrac/system-autoscaler/pkg/generated/informers/externalversions" cm "github.com/lterrac/system-autoscaler/pkg/pod-autoscaler/pkg/contention-manager" resupd "github.com/lterrac/system-autoscaler/pkg/pod-autoscaler/pkg/pod-resource-updater" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/types" metricsclient "k8s.io/metrics/pkg/client/custom_metrics" coreinformers "k8s.io/client-go/informers" @@ -72,7 +72,7 @@ func main() { Pod: coreInformerFactory.Core().V1().Pods(), Node: coreInformerFactory.Core().V1().Nodes(), Service: coreInformerFactory.Core().V1().Services(), - ContainerScale: saInformerFactory.Systemautoscaler().V1beta1().ContainerScales(), + PodScale: saInformerFactory.Systemautoscaler().V1beta1().PodScales(), ServiceLevelAgreement: saInformerFactory.Systemautoscaler().V1beta1().ServiceLevelAgreements(), } diff --git a/pkg/pod-autoscaler/pkg/contention-manager/controller.go b/pkg/pod-autoscaler/pkg/contention-manager/controller.go index 8ca4f2e..9fd6699 100644 --- a/pkg/pod-autoscaler/pkg/contention-manager/controller.go +++ b/pkg/pod-autoscaler/pkg/contention-manager/controller.go @@ -2,9 +2,10 @@ package contentionmanager import ( "fmt" - "github.com/lterrac/system-autoscaler/pkg/informers" "time" + "github.com/lterrac/system-autoscaler/pkg/informers" + corev1 "k8s.io/api/core/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -16,25 +17,25 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/types" clientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" samplescheme "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned/scheme" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/types" ) // AgentName is the controller name used // both in logs and labels to identify it const AgentName = "contention-manager" -// Controller is responsible of adjusting containerscale partially computed by the recommender +// Controller is responsible of adjusting podscale partially computed by the recommender // taking into account the actual node capacity type Controller struct { - kubeClientset kubernetes.Interface - containerScalesClientset clientset.Interface + kubeClientset kubernetes.Interface + podScalesClientset clientset.Interface listers informers.Listers - containerScalesSynced cache.InformerSynced - nodesSynced cache.InformerSynced + podScalesSynced cache.InformerSynced + nodesSynced cache.InformerSynced recorder record.EventRecorder @@ -42,10 +43,10 @@ type Controller struct { out chan types.NodeScales } -// NewController returns a new ContainerScale controller +// NewController returns a new PodScale controller func NewController( kubeClient kubernetes.Interface, - containerScalesClient clientset.Interface, + podScalesClient clientset.Interface, informers informers.Informers, in chan types.NodeScales, out chan types.NodeScales) *Controller { @@ -60,13 +61,13 @@ func NewController( recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: AgentName}) controller := &Controller{ - kubeClientset: kubeClient, - containerScalesClientset: containerScalesClient, + kubeClientset: kubeClient, + podScalesClientset: podScalesClient, listers: informers.GetListers(), - containerScalesSynced: informers.ContainerScale.Informer().HasSynced, - nodesSynced: informers.Node.Informer().HasSynced, + podScalesSynced: informers.PodScale.Informer().HasSynced, + nodesSynced: informers.Node.Informer().HasSynced, recorder: recorder, @@ -89,13 +90,13 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, - c.containerScalesSynced, + c.podScalesSynced, c.nodesSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.Info("Starting contention manager workers") - // Launch two workers to process containerScale resources + // Launch two workers to process podScale resources for i := 0; i < threadiness; i++ { go wait.Until(c.runWorker, time.Second, stopCh) } diff --git a/pkg/pod-autoscaler/pkg/contention-manager/logic.go b/pkg/pod-autoscaler/pkg/contention-manager/logic.go index 58341be..de74320 100644 --- a/pkg/pod-autoscaler/pkg/contention-manager/logic.go +++ b/pkg/pod-autoscaler/pkg/contention-manager/logic.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/types" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/types" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,9 +29,9 @@ func proportional(desired, totalDesired, totalAvailable int64) int64 { // ContentionManager embeds the contention resolution logic on a given Node. type ContentionManager struct { solverFn - CPUCapacity *resource.Quantity - MemoryCapacity *resource.Quantity - ContainerScales []*v1beta1.ContainerScale + CPUCapacity *resource.Quantity + MemoryCapacity *resource.Quantity + PodScales []*v1beta1.PodScale } // NewContentionManager returns a new ContentionManager instance @@ -80,27 +80,27 @@ func NewContentionManager(n *corev1.Node, ns types.NodeScales, p []corev1.Pod, s } return &ContentionManager{ - solverFn: solver, - CPUCapacity: allocatableCPU, - MemoryCapacity: allocatableMemory, - ContainerScales: ns.ContainerScales, + solverFn: solver, + CPUCapacity: allocatableCPU, + MemoryCapacity: allocatableMemory, + PodScales: ns.PodScales, } } -// Solve resolves the contentions between the containerscales -func (m *ContentionManager) Solve() []*v1beta1.ContainerScale { +// Solve resolves the contentions between the podscales +func (m *ContentionManager) Solve() []*v1beta1.PodScale { desiredCPU := &resource.Quantity{} desiredMemory := &resource.Quantity{} - for _, containerscale := range m.ContainerScales { - desiredCPU.Add(*containerscale.Status.CappedResources.Cpu()) - desiredMemory.Add(*containerscale.Status.CappedResources.Memory()) + for _, podscale := range m.PodScales { + desiredCPU.Add(*podscale.Status.CappedResources.Cpu()) + desiredMemory.Add(*podscale.Status.CappedResources.Memory()) } var actualCPU *resource.Quantity var actualMemory *resource.Quantity - for _, cs := range m.ContainerScales { + for _, cs := range m.PodScales { if desiredCPU.Cmp(*m.CPUCapacity) == 1 { actualCPU = resource.NewMilliQuantity( m.solverFn( @@ -138,16 +138,16 @@ func (m *ContentionManager) Solve() []*v1beta1.ContainerScale { } } - return m.ContainerScales + return m.PodScales } // processNextNode adjust the resources of all the pods scheduled on a node // according to the actual capacity. Resources not tracked by System Autoscaler // are not considered. -func (c *Controller) processNextNode(containerscalesInfos <-chan types.NodeScales) bool { - for containerscalesInfo := range containerscalesInfos { +func (c *Controller) processNextNode(podscalesInfos <-chan types.NodeScales) bool { + for podscalesInfo := range podscalesInfos { - node, err := c.listers.NodeLister.Get(containerscalesInfo.Node) + node, err := c.listers.NodeLister.Get(podscalesInfo.Node) if err != nil { utilruntime.HandleError(fmt.Errorf("error while getting node: %#v", err)) @@ -166,13 +166,13 @@ func (c *Controller) processNextNode(containerscalesInfos <-chan types.NodeScale return true } - cm := NewContentionManager(node, containerscalesInfo, pods.Items, proportional) + cm := NewContentionManager(node, podscalesInfo, pods.Items, proportional) nodeScale := cm.Solve() - containerscalesInfo.ContainerScales = nodeScale + podscalesInfo.PodScales = nodeScale - c.out <- containerscalesInfo + c.out <- podscalesInfo } return true diff --git a/pkg/pod-autoscaler/pkg/contention-manager/logic_test.go b/pkg/pod-autoscaler/pkg/contention-manager/logic_test.go index 34cf504..967bfe7 100644 --- a/pkg/pod-autoscaler/pkg/contention-manager/logic_test.go +++ b/pkg/pod-autoscaler/pkg/contention-manager/logic_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/types" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/types" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -194,21 +194,17 @@ func TestNewContentionManager(t *testing.T) { description: "should not consider pods with QOS classes not equal to guaranteed", nodeScale: types.NodeScales{ Node: nodeName, - ContainerScales: []*v1beta1.ContainerScale{ + PodScales: []*v1beta1.PodScale{ { - Spec: v1beta1.ContainerScaleSpec{ - PodRef: v1beta1.PodRef{ - Name: firstName, - Namespace: firstNamespace, - }, + Spec: v1beta1.PodScaleSpec{ + Namespace: firstNamespace, + Pod: firstName, }, }, { - Spec: v1beta1.ContainerScaleSpec{ - PodRef: v1beta1.PodRef{ - Name: secondName, - Namespace: secondNamespace, - }, + Spec: v1beta1.PodScaleSpec{ + Namespace: secondNamespace, + Pod: secondName, }, }, }, @@ -291,8 +287,8 @@ func TestSolve(t *testing.T) { testcases := []struct { description string ContentionManager - expected []*v1beta1.ContainerScale - asserts func(*testing.T, []*v1beta1.ContainerScale, []*v1beta1.ContainerScale) + expected []*v1beta1.PodScale + asserts func(*testing.T, []*v1beta1.PodScale, []*v1beta1.PodScale) }{ { description: "should get the desired capped resources", @@ -300,19 +296,19 @@ func TestSolve(t *testing.T) { solverFn: proportional, CPUCapacity: resource.NewScaledQuantity(100, resource.Milli), MemoryCapacity: resource.NewScaledQuantity(100, resource.Mega), - ContainerScales: []*v1beta1.ContainerScale{ + PodScales: []*v1beta1.PodScale{ { ObjectMeta: metav1.ObjectMeta{ Name: "", Namespace: "", }, - Spec: v1beta1.ContainerScaleSpec{ + Spec: v1beta1.PodScaleSpec{ DesiredResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(50, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(50, resource.Mega), }, }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(50, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(50, resource.Mega), @@ -325,19 +321,19 @@ func TestSolve(t *testing.T) { }, }, }, - expected: []*v1beta1.ContainerScale{ + expected: []*v1beta1.PodScale{ { ObjectMeta: metav1.ObjectMeta{ Name: "", Namespace: "", }, - Spec: v1beta1.ContainerScaleSpec{ + Spec: v1beta1.PodScaleSpec{ DesiredResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(50, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(50, resource.Mega), }, }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(50, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(50, resource.Mega), @@ -349,7 +345,7 @@ func TestSolve(t *testing.T) { }, }, }, - asserts: func(t *testing.T, expected []*v1beta1.ContainerScale, actual []*v1beta1.ContainerScale) { + asserts: func(t *testing.T, expected []*v1beta1.PodScale, actual []*v1beta1.PodScale) { for i := range expected { require.Equal(t, 0, expected[i].Status.ActualResources.Cpu().Cmp(*actual[i].Status.ActualResources.Cpu())) require.Equal(t, 0, expected[i].Status.ActualResources.Memory().Cmp(*actual[i].Status.ActualResources.Memory())) @@ -362,19 +358,19 @@ func TestSolve(t *testing.T) { solverFn: proportional, CPUCapacity: resource.NewScaledQuantity(100, resource.Milli), MemoryCapacity: resource.NewScaledQuantity(100, resource.Mega), - ContainerScales: []*v1beta1.ContainerScale{ + PodScales: []*v1beta1.PodScale{ { ObjectMeta: metav1.ObjectMeta{ Name: "", Namespace: "", }, - Spec: v1beta1.ContainerScaleSpec{ + Spec: v1beta1.PodScaleSpec{ DesiredResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(100, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(100, resource.Mega), }, }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(100, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(100, resource.Mega), @@ -390,13 +386,13 @@ func TestSolve(t *testing.T) { Name: "", Namespace: "", }, - Spec: v1beta1.ContainerScaleSpec{ + Spec: v1beta1.PodScaleSpec{ DesiredResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(100, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(100, resource.Mega), }, }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(100, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(100, resource.Mega), @@ -409,19 +405,19 @@ func TestSolve(t *testing.T) { }, }, }, - expected: []*v1beta1.ContainerScale{ + expected: []*v1beta1.PodScale{ { ObjectMeta: metav1.ObjectMeta{ Name: "", Namespace: "", }, - Spec: v1beta1.ContainerScaleSpec{ + Spec: v1beta1.PodScaleSpec{ DesiredResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(100, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(100, resource.Mega), }, }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(50, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(50, resource.Mega), @@ -437,13 +433,13 @@ func TestSolve(t *testing.T) { Name: "", Namespace: "", }, - Spec: v1beta1.ContainerScaleSpec{ + Spec: v1beta1.PodScaleSpec{ DesiredResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(100, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(100, resource.Mega), }, }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: corev1.ResourceList{ corev1.ResourceCPU: *resource.NewScaledQuantity(50, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity(50, resource.Mega), @@ -455,7 +451,7 @@ func TestSolve(t *testing.T) { }, }, }, - asserts: func(t *testing.T, expected []*v1beta1.ContainerScale, actual []*v1beta1.ContainerScale) { + asserts: func(t *testing.T, expected []*v1beta1.PodScale, actual []*v1beta1.PodScale) { for i := range expected { require.Equal(t, 0, expected[i].Status.ActualResources.Cpu().Cmp(*actual[i].Status.ActualResources.Cpu())) require.Equal(t, 0, expected[i].Status.ActualResources.Memory().Cmp(*actual[i].Status.ActualResources.Memory())) @@ -465,12 +461,12 @@ func TestSolve(t *testing.T) { } for _, tt := range testcases { t.Run(tt.description, func(t *testing.T) { - containerscales := tt.ContentionManager.Solve() + podscales := tt.ContentionManager.Solve() totalCPU := resource.Quantity{} totalMemory := resource.Quantity{} - for _, p := range containerscales { + for _, p := range podscales { totalCPU.Add(*p.Status.ActualResources.Cpu()) totalMemory.Add(*p.Status.ActualResources.Memory()) } @@ -478,7 +474,7 @@ func TestSolve(t *testing.T) { require.GreaterOrEqual(t, tt.ContentionManager.CPUCapacity.MilliValue(), totalCPU.MilliValue()) require.GreaterOrEqual(t, tt.ContentionManager.MemoryCapacity.MilliValue(), totalMemory.MilliValue()) - tt.asserts(t, tt.expected, containerscales) + tt.asserts(t, tt.expected, podscales) }) } } diff --git a/pkg/pod-autoscaler/pkg/e2e/controller_suite_test.go b/pkg/pod-autoscaler/pkg/e2e/controller_suite_test.go index c85d2d2..0254dd3 100644 --- a/pkg/pod-autoscaler/pkg/e2e/controller_suite_test.go +++ b/pkg/pod-autoscaler/pkg/e2e/controller_suite_test.go @@ -11,10 +11,10 @@ import ( "github.com/lterrac/system-autoscaler/pkg/informers" "k8s.io/apimachinery/pkg/labels" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/types" sainformers "github.com/lterrac/system-autoscaler/pkg/generated/informers/externalversions" resupd "github.com/lterrac/system-autoscaler/pkg/pod-autoscaler/pkg/pod-resource-updater" "github.com/lterrac/system-autoscaler/pkg/pod-autoscaler/pkg/recommender" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/types" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -83,7 +83,7 @@ var _ = BeforeSuite(func(done Done) { Pod: coreInformerFactory.Core().V1().Pods(), Node: coreInformerFactory.Core().V1().Nodes(), Service: coreInformerFactory.Core().V1().Services(), - ContainerScale: crdInformerFactory.Systemautoscaler().V1beta1().ContainerScales(), + PodScale: crdInformerFactory.Systemautoscaler().V1beta1().PodScales(), ServiceLevelAgreement: crdInformerFactory.Systemautoscaler().V1beta1().ServiceLevelAgreements(), } @@ -225,16 +225,16 @@ func newPod(name string, container string, podLabels map[string]string) *corev1. } } -func newContainerScale(sla *sa.ServiceLevelAgreement, pod *corev1.Pod, selectorLabels map[string]string) *sa.ContainerScale { +func newPodScale(sla *sa.ServiceLevelAgreement, pod *corev1.Pod, selectorLabels map[string]string) *sa.PodScale { podLabels := make(labels.Set) for k, v := range selectorLabels { podLabels[k] = v } podLabels["system.autoscaler/node"] = pod.Spec.NodeName - return &sa.ContainerScale{ + return &sa.PodScale{ TypeMeta: metav1.TypeMeta{ APIVersion: "sa.polimi.it/v1beta1", - Kind: "ContainerScale", + Kind: "PodScale", }, ObjectMeta: metav1.ObjectMeta{ Name: "pod-" + pod.GetName(), @@ -249,19 +249,14 @@ func newContainerScale(sla *sa.ServiceLevelAgreement, pod *corev1.Pod, selectorL }, }, }, - Spec: sa.ContainerScaleSpec{ - SLARef: sa.SLARef{ - Name: sla.GetName(), - Namespace: sla.GetNamespace(), - }, - PodRef: sa.PodRef{ - Name: pod.GetName(), - Namespace: pod.GetNamespace(), - }, + Spec: sa.PodScaleSpec{ + Namespace: sla.GetNamespace(), + SLA: sla.GetName(), + Pod: pod.GetName(), Container: pod.Spec.Containers[0].Name, DesiredResources: sla.Spec.DefaultResources, }, - Status: sa.ContainerScaleStatus{ + Status: sa.PodScaleStatus{ ActualResources: sla.Spec.DefaultResources, CappedResources: sla.Spec.DefaultResources, }, diff --git a/pkg/pod-autoscaler/pkg/e2e/recommender_ginkgo_test.go b/pkg/pod-autoscaler/pkg/e2e/recommender_ginkgo_test.go index 5ad8b47..b84f212 100644 --- a/pkg/pod-autoscaler/pkg/e2e/recommender_ginkgo_test.go +++ b/pkg/pod-autoscaler/pkg/e2e/recommender_ginkgo_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "context" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,8 +45,8 @@ var _ = Describe("Recommender controller", func() { return pod.Spec.NodeName != "" }, timeout, interval).Should(BeTrue()) - containerScale := newContainerScale(sla, pod, labels) - containerScale, err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Create(ctx, containerScale, metav1.CreateOptions{}) + podScale := newPodScale(sla, pod, labels) + podScale, err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Create(ctx, podScale, metav1.CreateOptions{}) Expect(err).ShouldNot(HaveOccurred()) Eventually(func() bool { @@ -54,12 +55,12 @@ var _ = Describe("Recommender controller", func() { Expect(err).ShouldNot(HaveOccurred()) nodeScale := <-recommenderOut return nodeScale.Node == pod.Spec.NodeName && - len(nodeScale.ContainerScales) == 1 && - nodeScale.ContainerScales[0].Namespace == containerScale.Namespace && - nodeScale.ContainerScales[0].Name == containerScale.Name + len(nodeScale.PodScales) == 1 && + nodeScale.PodScales[0].Namespace == podScale.Namespace && + nodeScale.PodScales[0].Name == podScale.Name }, timeout, interval).Should(BeTrue()) - err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Delete(ctx, containerScale.Name, metav1.DeleteOptions{}) + err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Delete(ctx, podScale.Name, metav1.DeleteOptions{}) Expect(err).ShouldNot(HaveOccurred()) err = saClient.SystemautoscalerV1beta1().ServiceLevelAgreements(namespace).Delete(ctx, sla.Name, metav1.DeleteOptions{}) @@ -105,8 +106,8 @@ var _ = Describe("Recommender controller", func() { return pod1.Spec.NodeName != "" }, timeout, interval).Should(BeTrue()) - containerScale1 := newContainerScale(sla, pod1, labels) - containerScale1, err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Create(ctx, containerScale1, metav1.CreateOptions{}) + podScale1 := newPodScale(sla, pod1, labels) + podScale1, err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Create(ctx, podScale1, metav1.CreateOptions{}) Expect(err).ShouldNot(HaveOccurred()) Eventually(func() bool { @@ -115,9 +116,9 @@ var _ = Describe("Recommender controller", func() { Expect(err).ShouldNot(HaveOccurred()) nodeScale := <-recommenderOut return nodeScale.Node == pod1.Spec.NodeName && - len(nodeScale.ContainerScales) == 1 && - nodeScale.ContainerScales[0].Namespace == containerScale1.Namespace && - nodeScale.ContainerScales[0].Name == containerScale1.Name + len(nodeScale.PodScales) == 1 && + nodeScale.PodScales[0].Namespace == podScale1.Namespace && + nodeScale.PodScales[0].Name == podScale1.Name }, timeout, interval).Should(BeTrue()) pod2 := newPod("replica2", containerName, podLabels) @@ -130,11 +131,11 @@ var _ = Describe("Recommender controller", func() { return pod2.Spec.NodeName != "" }, timeout, interval).Should(BeTrue()) - containerScale2 := newContainerScale(sla, pod2, labels) - containerScale2, err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Create(ctx, containerScale2, metav1.CreateOptions{}) + podScale2 := newPodScale(sla, pod2, labels) + podScale2, err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Create(ctx, podScale2, metav1.CreateOptions{}) Expect(err).ShouldNot(HaveOccurred()) - err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Delete(ctx, containerScale1.Name, metav1.DeleteOptions{}) + err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Delete(ctx, podScale1.Name, metav1.DeleteOptions{}) Expect(err).ShouldNot(HaveOccurred()) err = kubeClient.CoreV1().Pods(namespace).Delete(ctx, pod1.Name, metav1.DeleteOptions{}) @@ -146,9 +147,9 @@ var _ = Describe("Recommender controller", func() { Expect(err).ShouldNot(HaveOccurred()) nodeScale := <-recommenderOut return nodeScale.Node == pod2.Spec.NodeName && - len(nodeScale.ContainerScales) == 1 && - nodeScale.ContainerScales[0].Namespace == containerScale2.Namespace && - nodeScale.ContainerScales[0].Name == containerScale2.Name + len(nodeScale.PodScales) == 1 && + nodeScale.PodScales[0].Namespace == podScale2.Namespace && + nodeScale.PodScales[0].Name == podScale2.Name }, timeout, interval).Should(BeTrue()) // For x control periods, pod 1 does not appear in the channel @@ -160,16 +161,16 @@ var _ = Describe("Recommender controller", func() { for i := 0; i < x; i++ { nodeScale := <-recommenderOut if nodeScale.Node == pod1.Spec.NodeName && - len(nodeScale.ContainerScales) == 1 && - nodeScale.ContainerScales[0].Namespace == containerScale1.Namespace && - nodeScale.ContainerScales[0].Name == containerScale1.Name { + len(nodeScale.PodScales) == 1 && + nodeScale.PodScales[0].Namespace == podScale1.Namespace && + nodeScale.PodScales[0].Name == podScale1.Name { return false } } return true }, timeout, interval).Should(BeTrue()) - err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Delete(ctx, containerScale2.Name, metav1.DeleteOptions{}) + err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Delete(ctx, podScale2.Name, metav1.DeleteOptions{}) Expect(err).ShouldNot(HaveOccurred()) err = kubeClient.CoreV1().Pods(namespace).Delete(ctx, pod2.Name, metav1.DeleteOptions{}) diff --git a/pkg/pod-autoscaler/pkg/e2e/resource_updater_ginkgo_test.go b/pkg/pod-autoscaler/pkg/e2e/resource_updater_ginkgo_test.go index 6dd8ad2..ae35d89 100644 --- a/pkg/pod-autoscaler/pkg/e2e/resource_updater_ginkgo_test.go +++ b/pkg/pod-autoscaler/pkg/e2e/resource_updater_ginkgo_test.go @@ -2,8 +2,9 @@ package e2e_test import ( "context" + sa "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/types" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/types" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -48,21 +49,21 @@ var _ = Describe("Resource Updater controller", func() { return pod.Spec.NodeName != "" }, timeout, interval).Should(BeTrue()) - containerScale := newContainerScale(sla, pod, labels) - containerScale, err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Create(ctx, containerScale, metav1.CreateOptions{}) + podScale := newPodScale(sla, pod, labels) + podScale, err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Create(ctx, podScale, metav1.CreateOptions{}) Expect(err).ShouldNot(HaveOccurred()) - updatedContainerScale := containerScale.DeepCopy() - updatedContainerScale.Status.ActualResources[corev1.ResourceCPU] = *resource.NewScaledQuantity(500, resource.Milli) - updatedContainerScale.Status.ActualResources[corev1.ResourceMemory] = *resource.NewScaledQuantity(500, resource.Mega) - klog.Info(updatedContainerScale) + updatedPodScale := podScale.DeepCopy() + updatedPodScale.Status.ActualResources[corev1.ResourceCPU] = *resource.NewScaledQuantity(500, resource.Milli) + updatedPodScale.Status.ActualResources[corev1.ResourceMemory] = *resource.NewScaledQuantity(500, resource.Mega) + klog.Info(updatedPodScale) - containerScales := make([]*sa.ContainerScale, 0) - containerScales = append(containerScales, updatedContainerScale) + podScales := make([]*sa.PodScale, 0) + podScales = append(podScales, updatedPodScale) nodeScale := types.NodeScales{ - Node: pod.Spec.NodeName, - ContainerScales: containerScales, + Node: pod.Spec.NodeName, + PodScales: podScales, } contentionManagerOut <- nodeScale @@ -79,11 +80,11 @@ var _ = Describe("Resource Updater controller", func() { return requestCpu == limitCpu && requestMem == limitMem && - requestCpu == updatedContainerScale.Status.ActualResources.Cpu().ScaledValue(resource.Milli) && - requestMem == updatedContainerScale.Status.ActualResources.Memory().ScaledValue(resource.Mega) + requestCpu == updatedPodScale.Status.ActualResources.Cpu().ScaledValue(resource.Milli) && + requestMem == updatedPodScale.Status.ActualResources.Memory().ScaledValue(resource.Mega) }, timeout, interval).Should(BeTrue()) - err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Delete(ctx, containerScale.Name, metav1.DeleteOptions{}) + err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Delete(ctx, podScale.Name, metav1.DeleteOptions{}) Expect(err).ShouldNot(HaveOccurred()) err = saClient.SystemautoscalerV1beta1().ServiceLevelAgreements(namespace).Delete(ctx, sla.Name, metav1.DeleteOptions{}) diff --git a/pkg/pod-autoscaler/pkg/pod-resource-updater/controller.go b/pkg/pod-autoscaler/pkg/pod-resource-updater/controller.go index 3c202a5..c47c083 100644 --- a/pkg/pod-autoscaler/pkg/pod-resource-updater/controller.go +++ b/pkg/pod-autoscaler/pkg/pod-resource-updater/controller.go @@ -11,9 +11,9 @@ import ( "time" "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/types" - containerscalesclientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" + podscalesclientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" samplescheme "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned/scheme" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/types" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -34,16 +34,16 @@ const controllerAgentName = "pod-resource-updater" // For pod metrics it retrieves, it computes the new resources to assign to the pod. type Controller struct { - // containerScalesClientset is a clientset for our own API group - containerScalesClientset containerscalesclientset.Interface + // podScalesClientset is a clientset for our own API group + podScalesClientset podscalesclientset.Interface // kubernetesCLientset is the client-go of kubernetes kubernetesClientset kubernetes.Interface listers informers.Listers - containerScalesSynced cache.InformerSynced - podSynced cache.InformerSynced + podScalesSynced cache.InformerSynced + podSynced cache.InformerSynced // recorder is an event recorder for recording Event resources to the // Kubernetes API. @@ -57,7 +57,7 @@ type Controller struct { // NewController returns a new sample controller func NewController(kubernetesClientset *kubernetes.Clientset, - containerScalesClientset containerscalesclientset.Interface, + podScalesClientset podscalesclientset.Interface, informers informers.Informers, in chan types.NodeScales) *Controller { @@ -70,7 +70,7 @@ func NewController(kubernetesClientset *kubernetes.Clientset, eventBroadcaster.StartStructuredLogging(0) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) - fileLogger, err := logger.NewFileLogger("/var/containerscale.json") + fileLogger, err := logger.NewFileLogger("/var/podscale.json") //TODO: remove as soon as possible if err != nil { @@ -79,14 +79,14 @@ func NewController(kubernetesClientset *kubernetes.Clientset, // Instantiate the Controller controller := &Controller{ - containerScalesClientset: containerScalesClientset, - kubernetesClientset: kubernetesClientset, - recorder: recorder, - listers: informers.GetListers(), - containerScalesSynced: informers.ContainerScale.Informer().HasSynced, - podSynced: informers.Pod.Informer().HasSynced, - log: fileLogger, - in: in, + podScalesClientset: podScalesClientset, + kubernetesClientset: kubernetesClientset, + recorder: recorder, + listers: informers.GetListers(), + podScalesSynced: informers.PodScale.Informer().HasSynced, + podSynced: informers.Pod.Informer().HasSynced, + log: fileLogger, + in: in, } return controller @@ -104,13 +104,13 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, - c.containerScalesSynced, + c.podScalesSynced, c.podSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.Info("Starting pod resource updater workers") - // Launch the workers to process containerScale resources and recommend new pod scales + // Launch the workers to process podScale resources and recommend new pod scales for i := 0; i < threadiness; i++ { go wait.Until(c.runNodeScaleWorker, time.Second, stopCh) } @@ -126,56 +126,56 @@ func (c *Controller) Shutdown() { func (c *Controller) runNodeScaleWorker() { for nodeScale := range c.in { klog.Info("Processing ", nodeScale) - for _, containerScale := range nodeScale.ContainerScales { + for _, podScale := range nodeScale.PodScales { - pod, err := c.listers.Pods(containerScale.Spec.PodRef.Namespace).Get(containerScale.Spec.PodRef.Name) + pod, err := c.listers.Pods(podScale.Spec.Namespace).Get(podScale.Spec.Pod) if err != nil { klog.Error("Error retrieving the pod: ", err) return } - newPod, err := syncPod(pod, *containerScale) + newPod, err := syncPod(pod, *podScale) if err != nil { klog.Error("Error syncing the pod: ", err) return } // try both updates in dry-run first and then actuate them consistently - updatedPod, updatedContainerScale, err := c.AtomicResourceUpdate(newPod, containerScale) + updatedPod, updatedPodScale, err := c.AtomicResourceUpdate(newPod, podScale) if err != nil { - klog.Error("Error while updating pod and containerscale: ", err) + klog.Error("Error while updating pod and podscale: ", err) //TODO: We are using this channel as a workqueue. Why don't use one? c.in <- nodeScale return } //TODO: handle error - _ = c.log.Log(updatedContainerScale) + _ = c.log.Log(updatedPodScale) - klog.Info("Desired resources:", updatedContainerScale.Spec.DesiredResources) - klog.Info("Actual resources:", updatedContainerScale.Status.ActualResources) + klog.Info("Desired resources:", updatedPodScale.Spec.DesiredResources) + klog.Info("Actual resources:", updatedPodScale.Status.ActualResources) klog.Info("Pod resources:", updatedPod.Spec.Containers[0].Resources) } } } -// AtomicResourceUpdate updates a Pod and its Containerscale consistently in order to keep synchronized the two resources. Before performing the real update +// AtomicResourceUpdate updates a Pod and its PodScale consistently in order to keep synchronized the two resources. Before performing the real update // it runs a request in dry-run and it checks for any potential error -func (c *Controller) AtomicResourceUpdate(pod *corev1.Pod, containerScale *v1beta1.ContainerScale) (*corev1.Pod, *v1beta1.ContainerScale, error) { +func (c *Controller) AtomicResourceUpdate(pod *corev1.Pod, podScale *v1beta1.PodScale) (*corev1.Pod, *v1beta1.PodScale, error) { var err error - _, _, err = c.updateResources(pod, containerScale, true) + _, _, err = c.updateResources(pod, podScale, true) if err != nil { klog.Error("Error while performing dry-run resource update: ", err) return nil, nil, err } - return c.updateResources(pod, containerScale, false) + return c.updateResources(pod, podScale, false) } -// updateResources performs Pod and Containerscale resource update in dry-run mode or not whether the corresponding flag is passed -func (c *Controller) updateResources(pod *corev1.Pod, containerScale *v1beta1.ContainerScale, dryRun bool) (newPod *corev1.Pod, newContainerScale *v1beta1.ContainerScale, err error) { +// updateResources performs Pod and PodScale resource update in dry-run mode or not whether the corresponding flag is passed +func (c *Controller) updateResources(pod *corev1.Pod, podScale *v1beta1.PodScale, dryRun bool) (newPod *corev1.Pod, newPodScale *v1beta1.PodScale, err error) { opts := &metav1.UpdateOptions{} @@ -183,19 +183,19 @@ func (c *Controller) updateResources(pod *corev1.Pod, containerScale *v1beta1.Co opts.DryRun = []string{metav1.DryRunAll} } - newPod, err = c.kubernetesClientset.CoreV1().Pods(containerScale.Spec.PodRef.Namespace).Update(context.TODO(), pod, *opts) + newPod, err = c.kubernetesClientset.CoreV1().Pods(podScale.Spec.Namespace).Update(context.TODO(), pod, *opts) if err != nil { klog.Error("Error updating the pod: ", err) return nil, nil, err } - newContainerScale, err = c.containerScalesClientset.SystemautoscalerV1beta1().ContainerScales(containerScale.Namespace).Update(context.TODO(), containerScale, *opts) + newPodScale, err = c.podScalesClientset.SystemautoscalerV1beta1().PodScales(podScale.Namespace).Update(context.TODO(), podScale, *opts) if err != nil { klog.Error("Error updating the pod scale: ", err) return nil, nil, err } - return newPod, newContainerScale, nil + return newPod, newPodScale, nil } diff --git a/pkg/pod-autoscaler/pkg/pod-resource-updater/utils.go b/pkg/pod-autoscaler/pkg/pod-resource-updater/utils.go index f3d3a91..8e93b86 100644 --- a/pkg/pod-autoscaler/pkg/pod-resource-updater/utils.go +++ b/pkg/pod-autoscaler/pkg/pod-resource-updater/utils.go @@ -2,12 +2,13 @@ package resourceupdater import ( "fmt" + "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" ) -func syncPod(pod *v1.Pod, containerScale v1beta1.ContainerScale) (*v1.Pod, error) { +func syncPod(pod *v1.Pod, podScale v1beta1.PodScale) (*v1.Pod, error) { newPod := pod.DeepCopy() @@ -15,18 +16,18 @@ func syncPod(pod *v1.Pod, containerScale v1beta1.ContainerScale) (*v1.Pod, error return nil, fmt.Errorf("the pod has %v but it should have 'guaranteed' QOS class", newPod.Status.QOSClass) } - if containerScale.Status.ActualResources.Cpu().MilliValue() <= 0 { - return nil, fmt.Errorf("pod scale must have positive cpu resource value, actual value: %v", containerScale.Status.ActualResources.Cpu().ScaledValue(resource.Milli)) + if podScale.Status.ActualResources.Cpu().MilliValue() <= 0 { + return nil, fmt.Errorf("pod scale must have positive cpu resource value, actual value: %v", podScale.Status.ActualResources.Cpu().ScaledValue(resource.Milli)) } - if containerScale.Status.ActualResources.Memory().MilliValue() <= 0 { - return nil, fmt.Errorf("pod scale must have positive memory resource value, actual value: %v", containerScale.Status.ActualResources.Memory().ScaledValue(resource.Mega)) + if podScale.Status.ActualResources.Memory().MilliValue() <= 0 { + return nil, fmt.Errorf("pod scale must have positive memory resource value, actual value: %v", podScale.Status.ActualResources.Memory().ScaledValue(resource.Mega)) } for i, container := range newPod.Spec.Containers { - if container.Name == containerScale.Spec.Container { - container.Resources.Requests = containerScale.Status.ActualResources - container.Resources.Limits = containerScale.Status.ActualResources + if container.Name == podScale.Spec.Container { + container.Resources.Requests = podScale.Status.ActualResources + container.Resources.Limits = podScale.Status.ActualResources newPod.Spec.Containers[i] = container break } diff --git a/pkg/pod-autoscaler/pkg/pod-resource-updater/utils_test.go b/pkg/pod-autoscaler/pkg/pod-resource-updater/utils_test.go index 3c43fcd..771a7cf 100644 --- a/pkg/pod-autoscaler/pkg/pod-resource-updater/utils_test.go +++ b/pkg/pod-autoscaler/pkg/pod-resource-updater/utils_test.go @@ -16,74 +16,74 @@ func TestSyncPod(t *testing.T) { // TODO: The test case should be modified in future in order to handle more granularity. // Instead of pod resource values, we should insert cpu and mem values for each container. testcases := []struct { - description string - podQOS v1.PodQOSClass - podNumberOfContainers int64 - podCPUValue int64 - podMemValue int64 - containerScaleCPUActualValue int64 - containerScaleMemActualValue int64 - success bool + description string + podQOS v1.PodQOSClass + podNumberOfContainers int64 + podCPUValue int64 + podMemValue int64 + podScaleCPUActualValue int64 + podScaleMemActualValue int64 + success bool }{ { - description: "successfully increased the resources of a pod", - podQOS: v1.PodQOSGuaranteed, - podNumberOfContainers: 1, - podCPUValue: 100, - podMemValue: 100, - containerScaleCPUActualValue: 1000, - containerScaleMemActualValue: 1000, - success: true, + description: "successfully increased the resources of a pod", + podQOS: v1.PodQOSGuaranteed, + podNumberOfContainers: 1, + podCPUValue: 100, + podMemValue: 100, + podScaleCPUActualValue: 1000, + podScaleMemActualValue: 1000, + success: true, }, { - description: "successfully decreased the resources of a pod", - podQOS: v1.PodQOSGuaranteed, - podNumberOfContainers: 1, - podCPUValue: 100, - podMemValue: 100, - containerScaleCPUActualValue: 1000, - containerScaleMemActualValue: 1000, - success: true, + description: "successfully decreased the resources of a pod", + podQOS: v1.PodQOSGuaranteed, + podNumberOfContainers: 1, + podCPUValue: 100, + podMemValue: 100, + podScaleCPUActualValue: 1000, + podScaleMemActualValue: 1000, + success: true, }, { - description: "fail to update a pod with negative cpu resource value", - podQOS: v1.PodQOSGuaranteed, - podNumberOfContainers: 1, - podCPUValue: 100, - podMemValue: 100, - containerScaleCPUActualValue: -1, - containerScaleMemActualValue: 1000, - success: false, + description: "fail to update a pod with negative cpu resource value", + podQOS: v1.PodQOSGuaranteed, + podNumberOfContainers: 1, + podCPUValue: 100, + podMemValue: 100, + podScaleCPUActualValue: -1, + podScaleMemActualValue: 1000, + success: false, }, { - description: "fail to update a pod with negative memory resource value", - podQOS: v1.PodQOSGuaranteed, - podNumberOfContainers: 1, - podCPUValue: 100, - podMemValue: 100, - containerScaleCPUActualValue: 1000, - containerScaleMemActualValue: -1, - success: false, + description: "fail to update a pod with negative memory resource value", + podQOS: v1.PodQOSGuaranteed, + podNumberOfContainers: 1, + podCPUValue: 100, + podMemValue: 100, + podScaleCPUActualValue: 1000, + podScaleMemActualValue: -1, + success: false, }, { - description: "fail to update a pod that has BE QOS", - podQOS: v1.PodQOSBestEffort, - podNumberOfContainers: 1, - podCPUValue: 100, - podMemValue: 100, - containerScaleCPUActualValue: 1000, - containerScaleMemActualValue: 1000, - success: false, + description: "fail to update a pod that has BE QOS", + podQOS: v1.PodQOSBestEffort, + podNumberOfContainers: 1, + podCPUValue: 100, + podMemValue: 100, + podScaleCPUActualValue: 1000, + podScaleMemActualValue: 1000, + success: false, }, { - description: "fail to update a pod that has BU QOS", - podQOS: v1.PodQOSBurstable, - podNumberOfContainers: 1, - podCPUValue: 100, - podMemValue: 100, - containerScaleCPUActualValue: 1000, - containerScaleMemActualValue: 1000, - success: false, + description: "fail to update a pod that has BU QOS", + podQOS: v1.PodQOSBurstable, + podNumberOfContainers: 1, + podCPUValue: 100, + podMemValue: 100, + podScaleCPUActualValue: 1000, + podScaleMemActualValue: 1000, + success: false, }, //{ // // TODO: this test should be changed once we are able to update multiple containers @@ -92,8 +92,8 @@ func TestSyncPod(t *testing.T) { // podNumberOfContainers: 2, // podCPUValue: 100, // podMemValue: 100, - // containerScaleCPUActualValue: 1000, - // containerScaleMemActualValue: 1000, + // podScaleCPUActualValue: 1000, + // podScaleMemActualValue: 1000, // success: false, //}, } @@ -137,40 +137,38 @@ func TestSyncPod(t *testing.T) { }, } // Instantiate the pod scale - containerScale := v1beta1.ContainerScale{ + podScale := v1beta1.PodScale{ TypeMeta: metav1.TypeMeta{ - Kind: "containerscales", + Kind: "podscales", APIVersion: v1beta1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ - Name: "containerscale-name", + Name: "podscale-name", Namespace: "default", }, - Spec: v1beta1.ContainerScaleSpec{ - PodRef: v1beta1.PodRef{ - Name: "pod-name", - Namespace: "default", - }, + Spec: v1beta1.PodScaleSpec{ + Pod: "pod-name", + Namespace: "default", DesiredResources: v1.ResourceList{ - v1.ResourceCPU: *resource.NewScaledQuantity(tt.containerScaleCPUActualValue, resource.Milli), - v1.ResourceMemory: *resource.NewScaledQuantity(tt.containerScaleMemActualValue, resource.Mega), + v1.ResourceCPU: *resource.NewScaledQuantity(tt.podScaleCPUActualValue, resource.Milli), + v1.ResourceMemory: *resource.NewScaledQuantity(tt.podScaleMemActualValue, resource.Mega), }, Container: "container-n-0", }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: v1.ResourceList{ - v1.ResourceCPU: *resource.NewScaledQuantity(tt.containerScaleCPUActualValue, resource.Milli), - v1.ResourceMemory: *resource.NewScaledQuantity(tt.containerScaleMemActualValue, resource.Mega), + v1.ResourceCPU: *resource.NewScaledQuantity(tt.podScaleCPUActualValue, resource.Milli), + v1.ResourceMemory: *resource.NewScaledQuantity(tt.podScaleMemActualValue, resource.Mega), }, }, } - newPod, err := syncPod(&pod, containerScale) + newPod, err := syncPod(&pod, podScale) if tt.success { require.Nil(t, err, "Do not expect error") - require.Equal(t, newPod.Spec.Containers[0].Resources.Limits.Cpu().ScaledValue(resource.Milli), tt.containerScaleCPUActualValue) - require.Equal(t, newPod.Spec.Containers[0].Resources.Requests.Cpu().ScaledValue(resource.Milli), tt.containerScaleCPUActualValue) - require.Equal(t, newPod.Spec.Containers[0].Resources.Limits.Memory().ScaledValue(resource.Mega), tt.containerScaleMemActualValue) - require.Equal(t, newPod.Spec.Containers[0].Resources.Requests.Memory().ScaledValue(resource.Mega), tt.containerScaleMemActualValue) + require.Equal(t, newPod.Spec.Containers[0].Resources.Limits.Cpu().ScaledValue(resource.Milli), tt.podScaleCPUActualValue) + require.Equal(t, newPod.Spec.Containers[0].Resources.Requests.Cpu().ScaledValue(resource.Milli), tt.podScaleCPUActualValue) + require.Equal(t, newPod.Spec.Containers[0].Resources.Limits.Memory().ScaledValue(resource.Mega), tt.podScaleMemActualValue) + require.Equal(t, newPod.Spec.Containers[0].Resources.Requests.Memory().ScaledValue(resource.Mega), tt.podScaleMemActualValue) require.Equal(t, newPod.Status.QOSClass, v1.PodQOSGuaranteed) } else { require.Error(t, err, "expected error") diff --git a/pkg/pod-autoscaler/pkg/recommender/controller.go b/pkg/pod-autoscaler/pkg/recommender/controller.go index 148a5f2..4bc1aeb 100644 --- a/pkg/pod-autoscaler/pkg/recommender/controller.go +++ b/pkg/pod-autoscaler/pkg/recommender/controller.go @@ -9,7 +9,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/types" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/types" "github.com/modern-go/concurrent" corev1 "k8s.io/api/core/v1" @@ -22,7 +22,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - containerscalesclientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" + podscalesclientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" samplescheme "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned/scheme" ) @@ -35,12 +35,12 @@ const responseTime = "response_time" // For pod metrics it retrieves, it computes the new resources to assign to the pod. type Controller struct { - // containerScalesClientset is a clientset for our own API group - containerScalesClientset containerscalesclientset.Interface + // podScalesClientset is a clientset for our own API group + podScalesClientset podscalesclientset.Interface listers informers.Listers - containerScalesSynced cache.InformerSynced + podScalesSynced cache.InformerSynced // kubernetesCLientset is the client-go of kubernetes kubernetesClientset kubernetes.Interface @@ -71,7 +71,7 @@ type Status struct { // NewController returns a new recommender func NewController( kubernetesClientset kubernetes.Interface, - containerScalesClientset containerscalesclientset.Interface, + podScalesClientset podscalesclientset.Interface, metricsClient MetricGetter, informers informers.Informers, out chan types.NodeScales, @@ -93,15 +93,15 @@ func NewController( // Instantiate the Controller controller := &Controller{ - containerScalesClientset: containerScalesClientset, - listers: informers.GetListers(), - containerScalesSynced: informers.ContainerScale.Informer().HasSynced, - kubernetesClientset: kubernetesClientset, - recommendNodeQueue: queue.NewQueue("RecommendQueue"), - status: status, - MetricGetter: metricsClient, - recorder: recorder, - out: out, + podScalesClientset: podScalesClientset, + listers: informers.GetListers(), + podScalesSynced: informers.PodScale.Informer().HasSynced, + kubernetesClientset: kubernetesClientset, + recommendNodeQueue: queue.NewQueue("RecommendQueue"), + status: status, + MetricGetter: metricsClient, + recorder: recorder, + out: out, } klog.Info("Setting up event handlers") @@ -120,12 +120,12 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") - if ok := cache.WaitForCacheSync(stopCh, c.containerScalesSynced); !ok { + if ok := cache.WaitForCacheSync(stopCh, c.podScalesSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.Info("Starting recommender workers") - // Launch the workers to process containerScale resources and recommendContainer new pod scales + // Launch the workers to process podScale resources and recommendContainer new pod scales for i := 0; i < threadiness; i++ { go wait.Until(c.runNodeRecommenderWorker, time.Second, stopCh) } @@ -168,35 +168,35 @@ func (c *Controller) recommendNode(node string) error { // Recommend to all pods in a node new pod scales resources. klog.Info("Recommending to node ", node) - newContainerScales := make([]*v1beta1.ContainerScale, 0) + newPodScales := make([]*v1beta1.PodScale, 0) listSelector := labels.Set(map[string]string{"system.autoscaler/node": node}).AsSelector() - containerscales, err := c.listers.ContainerScaleLister.List(listSelector) + podscales, err := c.listers.PodScaleLister.List(listSelector) if err != nil { utilruntime.HandleError(fmt.Errorf("list pod scales failed: %s", err)) return nil } - if len(containerscales) == 0 { - utilruntime.HandleError(fmt.Errorf("no containerscales found on node: %s", node)) + if len(podscales) == 0 { + utilruntime.HandleError(fmt.Errorf("no podscales found on node: %s", node)) return nil } - for _, containerscale := range containerscales { - newContainerScale, err := c.recommendContainer(containerscale) + for _, podscale := range podscales { + newPodScale, err := c.recommendContainer(podscale) if err != nil { //utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) // TODO: evaluate if we should use a 'continue' klog.Info(err) return err } - newContainerScales = append(newContainerScales, newContainerScale) + newPodScales = append(newPodScales, newPodScale) } nodeScales := types.NodeScales{ - Node: node, - ContainerScales: newContainerScales, + Node: node, + PodScales: newPodScales, } // Send to output channel. @@ -206,32 +206,31 @@ func (c *Controller) recommendNode(node string) error { } // recommendContainer recommends the new resources to assign to a pod -func (c *Controller) recommendContainer(containerScale *v1beta1.ContainerScale) (*v1beta1.ContainerScale, error) { - key := fmt.Sprintf("%s/%s", containerScale.Namespace, containerScale.Name) +func (c *Controller) recommendContainer(podScale *v1beta1.PodScale) (*v1beta1.PodScale, error) { + key := fmt.Sprintf("%s/%s", podScale.Namespace, podScale.Name) klog.Info("Recommending for ", key) // Get the pod associated with the pod scale - pod, err := c.listers.Pods(containerScale.Spec.PodRef.Namespace).Get(containerScale.Spec.PodRef.Name) + pod, err := c.listers.Pods(podScale.Spec.Namespace).Get(podScale.Spec.Pod) if err != nil { - return nil, fmt.Errorf("error: %s, cannot retrieve pod with name %s and namespace %s", err, containerScale.Spec.PodRef.Name, containerScale.Spec.PodRef.Namespace) + return nil, fmt.Errorf("error: %s, cannot retrieve pod with name %s and namespace %s", err, podScale.Spec.Pod, podScale.Spec.Namespace) } // Retrieve the sla - sla, err := c.listers.ServiceLevelAgreements(containerScale.Spec.SLARef.Namespace).Get(containerScale.Spec.SLARef.Name) + sla, err := c.listers.ServiceLevelAgreements(podScale.Spec.Namespace).Get(podScale.Spec.SLA) if err != nil { - //utilruntime.HandleError(fmt.Errorf("error: %s, failed to get sla with name %s and namespace %s from lister", err, containerScale.Spec.SLARef.Name, containerScale.Spec.SLARef.Namespace)) return nil, err } // Retrieve the logic - logicInterface, ok := c.status.logicMap.LoadOrStore(key, newControlTheoryLogic(containerScale)) + logicInterface, ok := c.status.logicMap.LoadOrStore(key, newControlTheoryLogic(podScale)) if !ok { return nil, fmt.Errorf("the key %s has no logic associated with it", key) } logic, ok := logicInterface.(Logic) if !ok { - return nil, fmt.Errorf("error: %s, failed to cast logic with name %s and namespace %s", err, containerScale.Spec.SLARef.Name, containerScale.Spec.SLARef.Namespace) + return nil, fmt.Errorf("error: %s, failed to cast logic with name %s and namespace %s", err, podScale.Spec.SLA, podScale.Spec.Namespace) } // Retrieve the metrics @@ -241,7 +240,7 @@ func (c *Controller) recommendContainer(containerScale *v1beta1.ContainerScale) } // Compute the new resources - newContainerScale, err := logic.computeContainerScale(pod, containerScale, sla, metrics) + newPodScale, err := logic.computePodScale(pod, podScale, sla, metrics) - return newContainerScale, nil + return newPodScale, nil } diff --git a/pkg/pod-autoscaler/pkg/recommender/logic.go b/pkg/pod-autoscaler/pkg/recommender/logic.go index 3b95787..fe7a35f 100644 --- a/pkg/pod-autoscaler/pkg/recommender/logic.go +++ b/pkg/pod-autoscaler/pkg/recommender/logic.go @@ -14,7 +14,7 @@ import ( // Logic is the logic with which the recommender suggests new resources type Logic interface { - computeContainerScale(pod *v1.Pod, containerScale *v1beta1.ContainerScale, sla *v1beta1.ServiceLevelAgreement, metric *metricsv1beta2.MetricValue) (*v1beta1.ContainerScale, error) + computePodScale(pod *v1.Pod, podScale *v1beta1.PodScale, sla *v1beta1.ServiceLevelAgreement, metric *metricsv1beta2.MetricValue) (*v1beta1.PodScale, error) } // ControlTheoryLogic is the logic that apply control theory in order to recommendContainer new resources @@ -24,10 +24,10 @@ type ControlTheoryLogic struct { } // newControlTheoryLogic returns a new control theory logic -func newControlTheoryLogic(containerScale *v1beta1.ContainerScale) *ControlTheoryLogic { +func newControlTheoryLogic(podScale *v1beta1.PodScale) *ControlTheoryLogic { return &ControlTheoryLogic{ - xcprec: float64(containerScale.Status.ActualResources.Cpu().MilliValue()), - cores: float64(containerScale.Status.ActualResources.Cpu().MilliValue()), + xcprec: float64(podScale.Status.ActualResources.Cpu().MilliValue()), + cores: float64(podScale.Status.ActualResources.Cpu().MilliValue()), } } @@ -39,9 +39,9 @@ const ( DC = 10 ) -// computeContainerScale computes a new pod scale for a given pod. +// computePodScale computes a new pod scale for a given pod. // It also requires the old pod scale, the service level agreement and the pod metrics. -func (logic *ControlTheoryLogic) computeContainerScale(pod *v1.Pod, containerScale *v1beta1.ContainerScale, sla *v1beta1.ServiceLevelAgreement, metric *metricsv1beta2.MetricValue) (*v1beta1.ContainerScale, error) { +func (logic *ControlTheoryLogic) computePodScale(pod *v1.Pod, podScale *v1beta1.PodScale, sla *v1beta1.ServiceLevelAgreement, metric *metricsv1beta2.MetricValue) (*v1beta1.PodScale, error) { container, err := ContainerToScale(*pod, sla.Spec.Service.Container) @@ -52,7 +52,7 @@ func (logic *ControlTheoryLogic) computeContainerScale(pod *v1.Pod, containerSca // Compute the cpu and memory value for the pod desiredCPU := logic.computeCPUResource(container, sla, metric) - desiredMemory := logic.computeMemoryResource(container, containerScale, sla, metric) + desiredMemory := logic.computeMemoryResource(container, podScale, sla, metric) desiredResources := make(v1.ResourceList) desiredResources[v1.ResourceCPU] = *desiredCPU @@ -64,21 +64,21 @@ func (logic *ControlTheoryLogic) computeContainerScale(pod *v1.Pod, containerSca cappedResources[v1.ResourceCPU] = *cappedCPU cappedResources[v1.ResourceMemory] = *cappedMemory - // Copy the current ContainerScale and edit the desired value - newContainerScale := containerScale.DeepCopy() - newContainerScale.Spec.DesiredResources = desiredResources - newContainerScale.Status.CappedResources = cappedResources + // Copy the current PodScale and edit the desired value + newPodScale := podScale.DeepCopy() + newPodScale.Spec.DesiredResources = desiredResources + newPodScale.Status.CappedResources = cappedResources - return newContainerScale, nil + return newPodScale, nil } // computeMemoryResource computes memory resources for a given pod. -func (logic *ControlTheoryLogic) computeMemoryResource(container v1.Container, containerScale *v1beta1.ContainerScale, sla *v1beta1.ServiceLevelAgreement, metric *metricsv1beta2.MetricValue) *resource.Quantity { +func (logic *ControlTheoryLogic) computeMemoryResource(container v1.Container, podScale *v1beta1.PodScale, sla *v1beta1.ServiceLevelAgreement, metric *metricsv1beta2.MetricValue) *resource.Quantity { // Retrieve the value of actual and desired cpu resources // TODO: maybe can be deleted - desiredResource := containerScale.Spec.DesiredResources.Memory() - //actualResource := containerScale.Status.ActualResources.Memory() + desiredResource := podScale.Spec.DesiredResources.Memory() + //actualResource := podScale.Status.ActualResources.Memory() // Compute the new desired value newDesiredResource := resource.NewMilliQuantity(desiredResource.MilliValue(), resource.BinarySI) diff --git a/pkg/pod-autoscaler/pkg/recommender/logic_test.go b/pkg/pod-autoscaler/pkg/recommender/logic_test.go index 95e2ce1..54e4437 100644 --- a/pkg/pod-autoscaler/pkg/recommender/logic_test.go +++ b/pkg/pod-autoscaler/pkg/recommender/logic_test.go @@ -2,9 +2,10 @@ package recommender import ( "encoding/json" - "k8s.io/klog/v2" "testing" + "k8s.io/klog/v2" + "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -82,16 +83,16 @@ func TestControlTheoryLogic(t *testing.T) { }, } - containerScale := &v1beta1.ContainerScale{ + podScale := &v1beta1.PodScale{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{}, - Spec: v1beta1.ContainerScaleSpec{ + Spec: v1beta1.PodScaleSpec{ DesiredResources: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceCPU: *resource.NewScaledQuantity((tt.lowerBound+tt.upperBound)/2, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity((tt.lowerBound+tt.upperBound)/2, resource.Milli), }, }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceCPU: *resource.NewScaledQuantity((tt.lowerBound+tt.upperBound)/2, resource.Milli), corev1.ResourceMemory: *resource.NewScaledQuantity((tt.lowerBound+tt.upperBound)/2, resource.Milli), @@ -113,12 +114,12 @@ func TestControlTheoryLogic(t *testing.T) { } for i := 0; i < 200; i++ { - containerScale, err := logic.computeContainerScale(pod, containerScale, sla, &metricsMap) + podScale, err := logic.computePodScale(pod, podScale, sla, &metricsMap) require.Nil(t, err) - require.GreaterOrEqual(t, containerScale.Status.CappedResources.Cpu().MilliValue(), tt.lowerBound) - x, _ := json.Marshal(containerScale) + require.GreaterOrEqual(t, podScale.Status.CappedResources.Cpu().MilliValue(), tt.lowerBound) + x, _ := json.Marshal(podScale) klog.Info(string(x)) - require.LessOrEqual(t, containerScale.Status.CappedResources.Cpu().MilliValue(), tt.upperBound) + require.LessOrEqual(t, podScale.Status.CappedResources.Cpu().MilliValue(), tt.upperBound) } }) diff --git a/pkg/pod-replicas-updater/e2e/controller_suite_test.go b/pkg/pod-replicas-updater/e2e/controller_suite_test.go index c68b863..3596897 100644 --- a/pkg/pod-replicas-updater/e2e/controller_suite_test.go +++ b/pkg/pod-replicas-updater/e2e/controller_suite_test.go @@ -76,7 +76,7 @@ var _ = BeforeSuite(func(done Done) { Pod: coreInformerFactory.Core().V1().Pods(), Node: coreInformerFactory.Core().V1().Nodes(), Service: coreInformerFactory.Core().V1().Services(), - ContainerScale: crdInformerFactory.Systemautoscaler().V1beta1().ContainerScales(), + PodScale: crdInformerFactory.Systemautoscaler().V1beta1().PodScales(), ServiceLevelAgreement: crdInformerFactory.Systemautoscaler().V1beta1().ServiceLevelAgreements(), } @@ -250,16 +250,16 @@ func newDeployment(name string, container string, labels map[string]string, sele } } -func newContainerScale(sla *sa.ServiceLevelAgreement, pod *corev1.Pod, selectorLabels map[string]string) *sa.ContainerScale { +func newPodScale(sla *sa.ServiceLevelAgreement, pod *corev1.Pod, selectorLabels map[string]string) *sa.PodScale { podLabels := make(labels.Set) for k, v := range selectorLabels { podLabels[k] = v } podLabels["system.autoscaler/node"] = pod.Spec.NodeName - return &sa.ContainerScale{ + return &sa.PodScale{ TypeMeta: metav1.TypeMeta{ APIVersion: "sa.polimi.it/v1beta1", - Kind: "ContainerScale", + Kind: "PodScale", }, ObjectMeta: metav1.ObjectMeta{ Name: "pod-" + pod.GetName(), @@ -274,19 +274,14 @@ func newContainerScale(sla *sa.ServiceLevelAgreement, pod *corev1.Pod, selectorL }, }, }, - Spec: sa.ContainerScaleSpec{ - SLARef: sa.SLARef{ - Name: sla.GetName(), - Namespace: sla.GetNamespace(), - }, - PodRef: sa.PodRef{ - Name: pod.GetName(), - Namespace: pod.GetNamespace(), - }, + Spec: sa.PodScaleSpec{ + SLA: sla.GetName(), + Namespace: sla.GetNamespace(), + Pod: pod.GetName(), Container: pod.Spec.Containers[0].Name, DesiredResources: sla.Spec.DefaultResources, }, - Status: sa.ContainerScaleStatus{ + Status: sa.PodScaleStatus{ ActualResources: sla.Spec.DefaultResources, CappedResources: sla.Spec.DefaultResources, }, diff --git a/pkg/pod-replicas-updater/e2e/replica_updater_ginkgo_test.go b/pkg/pod-replicas-updater/e2e/replica_updater_ginkgo_test.go index 828d977..0dbe0a6 100644 --- a/pkg/pod-replicas-updater/e2e/replica_updater_ginkgo_test.go +++ b/pkg/pod-replicas-updater/e2e/replica_updater_ginkgo_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "context" + sa "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -56,12 +57,12 @@ var _ = Describe("Replica updater controller", func() { podList, err := getPodsForSvc(svc, namespace, *kubeClient) Expect(err).ShouldNot(HaveOccurred()) - var containerScales []*sa.ContainerScale + var podScales []*sa.PodScale for _, pod := range podList.Items { - containerScale := newContainerScale(sla, &pod, labels) - containerScale, err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Create(ctx, containerScale, metav1.CreateOptions{}) + podScale := newPodScale(sla, &pod, labels) + podScale, err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Create(ctx, podScale, metav1.CreateOptions{}) Expect(err).ShouldNot(HaveOccurred()) - containerScales = append(containerScales, containerScale) + podScales = append(podScales, podScale) } Eventually(func() bool { @@ -70,8 +71,8 @@ var _ = Describe("Replica updater controller", func() { return *(dp.Spec.Replicas) < int32(nReplicas) }, timeout, interval).Should(BeTrue()) - for _, containerScale := range containerScales { - err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Delete(ctx, containerScale.Name, metav1.DeleteOptions{}) + for _, podScale := range podScales { + err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Delete(ctx, podScale.Name, metav1.DeleteOptions{}) Expect(err).ShouldNot(HaveOccurred()) } @@ -129,13 +130,13 @@ var _ = Describe("Replica updater controller", func() { podList, err := getPodsForSvc(svc, namespace, *kubeClient) Expect(err).ShouldNot(HaveOccurred()) - var containerScales []*sa.ContainerScale + var podScales []*sa.PodScale for _, pod := range podList.Items { - containerScale := newContainerScale(sla, &pod, labels) - klog.Info(containerScale) - containerScale, err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Create(ctx, containerScale, metav1.CreateOptions{}) + podScale := newPodScale(sla, &pod, labels) + klog.Info(podScale) + podScale, err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Create(ctx, podScale, metav1.CreateOptions{}) Expect(err).ShouldNot(HaveOccurred()) - containerScales = append(containerScales, containerScale) + podScales = append(podScales, podScale) } Eventually(func() bool { @@ -143,8 +144,8 @@ var _ = Describe("Replica updater controller", func() { return *(dp.Spec.Replicas) > int32(nReplicas) }, timeout, interval).Should(BeTrue()) - for _, containerScale := range containerScales { - err = saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Delete(ctx, containerScale.Name, metav1.DeleteOptions{}) + for _, podScale := range podScales { + err = saClient.SystemautoscalerV1beta1().PodScales(namespace).Delete(ctx, podScale.Name, metav1.DeleteOptions{}) Expect(err).ShouldNot(HaveOccurred()) } diff --git a/pkg/pod-replicas-updater/main.go b/pkg/pod-replicas-updater/main.go index d892d8f..8ce07b4 100644 --- a/pkg/pod-replicas-updater/main.go +++ b/pkg/pod-replicas-updater/main.go @@ -2,6 +2,8 @@ package main import ( "flag" + "time" + clientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" sainformers "github.com/lterrac/system-autoscaler/pkg/generated/informers/externalversions" informers2 "github.com/lterrac/system-autoscaler/pkg/informers" @@ -11,7 +13,6 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - "time" ) var ( @@ -49,7 +50,7 @@ func main() { Pod: coreInformerFactory.Core().V1().Pods(), Node: coreInformerFactory.Core().V1().Nodes(), Service: coreInformerFactory.Core().V1().Services(), - ContainerScale: saInformerFactory.Systemautoscaler().V1beta1().ContainerScales(), + PodScale: saInformerFactory.Systemautoscaler().V1beta1().PodScales(), ServiceLevelAgreement: saInformerFactory.Systemautoscaler().V1beta1().ServiceLevelAgreements(), } diff --git a/pkg/pod-replicas-updater/pkg/controller.go b/pkg/pod-replicas-updater/pkg/controller.go index 1c8e5c7..ce87b71 100644 --- a/pkg/pod-replicas-updater/pkg/controller.go +++ b/pkg/pod-replicas-updater/pkg/controller.go @@ -3,6 +3,8 @@ package replicaupdater import ( "context" "fmt" + "time" + "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" saclientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" samplescheme "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned/scheme" @@ -19,7 +21,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - "time" ) const ( @@ -38,8 +39,8 @@ type Controller struct { listers informers.Listers - containerScaleSynced cache.InformerSynced - podSynced cache.InformerSynced + podScaleSynced cache.InformerSynced + podSynced cache.InformerSynced // recorder is an event recorder for recording Event resources to the // Kubernetes API. @@ -71,14 +72,14 @@ func NewController(kubernetesClientset *kubernetes.Clientset, // Instantiate the Controller controller := &Controller{ - saClientSet: saClientSet, - kubernetesClientset: kubernetesClientset, - recorder: recorder, - listers: informers.GetListers(), - containerScaleSynced: informers.ContainerScale.Informer().HasSynced, - podSynced: informers.Pod.Informer().HasSynced, - MetricClient: NewMetricClient(), - workqueue: queue.NewQueue("SLAQueue"), + saClientSet: saClientSet, + kubernetesClientset: kubernetesClientset, + recorder: recorder, + listers: informers.GetListers(), + podScaleSynced: informers.PodScale.Informer().HasSynced, + podSynced: informers.Pod.Informer().HasSynced, + MetricClient: NewMetricClient(), + workqueue: queue.NewQueue("SLAQueue"), } return controller @@ -96,7 +97,7 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, - c.containerScaleSynced, + c.podScaleSynced, c.podSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } @@ -145,21 +146,21 @@ func (c *Controller) handleSLA(key string) error { return fmt.Errorf("failed to retrieve the sla, error: %v", err) } - containerScales, err := c.listers.ContainerScaleLister.List(labels.Everything()) + podScales, err := c.listers.PodScaleLister.List(labels.Everything()) if err != nil { return fmt.Errorf("failed to retrieve the container scales, error: %v", err) } - if len(containerScales) == 0 { + if len(podScales) == 0 { return fmt.Errorf("no container scales found") } // Filter all pod scales and pods matched by the sla - var matchedContainerScales []*v1beta1.ContainerScale + var matchedPodScales []*v1beta1.PodScale var matchedPods []*corev1.Pod - for _, containerScale := range containerScales { - if containerScale.Spec.SLARef.Namespace == sla.Namespace && containerScale.Spec.SLARef.Name == sla.Name { - matchedContainerScales = append(matchedContainerScales, containerScale) - pod, err := c.listers.Pods(containerScale.Spec.PodRef.Namespace).Get(containerScale.Spec.PodRef.Name) + for _, podScale := range podScales { + if podScale.Spec.Namespace == sla.Namespace && podScale.Spec.SLA == sla.Name { + matchedPodScales = append(matchedPodScales, podScale) + pod, err := c.listers.Pods(podScale.Spec.Namespace).Get(podScale.Spec.Pod) if err != nil { return fmt.Errorf("failed to retrieve the pod, error: %v", err) } else { @@ -223,7 +224,7 @@ func (c *Controller) handleSLA(key string) error { } // Compute the new amount of replicas - nReplicas := logic.computeReplica(sla, matchedPods, matchedContainerScales, podMetrics, *deployment.Spec.Replicas) + nReplicas := logic.computeReplica(sla, matchedPods, matchedPodScales, podMetrics, *deployment.Spec.Replicas) klog.Info("SLA key: ", key, " new amount of replicas: ", nReplicas) // Set the new amount of replicas diff --git a/pkg/pod-replicas-updater/pkg/logic.go b/pkg/pod-replicas-updater/pkg/logic.go index b7f04f7..7d04654 100644 --- a/pkg/pod-replicas-updater/pkg/logic.go +++ b/pkg/pod-replicas-updater/pkg/logic.go @@ -1,17 +1,18 @@ package replicaupdater import ( + "math" + "time" + "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" - "math" - "time" ) // Logic is the logic the controller uses to suggest new replica values for an application type Logic interface { //computeReplica computes the number of replicas for an application - computeReplica(sla *v1beta1.ServiceLevelAgreement, pods []*corev1.Pod, podscales []*v1beta1.ContainerScale, metrics []map[string]interface{}, curReplica int32) int32 + computeReplica(sla *v1beta1.ServiceLevelAgreement, pods []*corev1.Pod, podscales []*v1beta1.PodScale, metrics []map[string]interface{}, curReplica int32) int32 } type HPALogicState string @@ -49,7 +50,7 @@ const ( ) //computeReplica computes the number of replicas for a service, given the serviceLevelAgreement -func (logic *HPALogic) computeReplica(sla *v1beta1.ServiceLevelAgreement, pods []*corev1.Pod, podscales []*v1beta1.ContainerScale, metrics []map[string]interface{}, curReplica int32) int32 { +func (logic *HPALogic) computeReplica(sla *v1beta1.ServiceLevelAgreement, pods []*corev1.Pod, podscales []*v1beta1.PodScale, metrics []map[string]interface{}, curReplica int32) int32 { minReplicas := sla.Spec.MinReplicas maxReplicas := sla.Spec.MaxReplicas diff --git a/pkg/containerscale-controller/.dockerignore b/pkg/podscale-controller/.dockerignore similarity index 100% rename from pkg/containerscale-controller/.dockerignore rename to pkg/podscale-controller/.dockerignore diff --git a/pkg/podscale-controller/Dockerfile b/pkg/podscale-controller/Dockerfile new file mode 100644 index 0000000..0b91135 --- /dev/null +++ b/pkg/podscale-controller/Dockerfile @@ -0,0 +1,7 @@ +FROM gcr.io/distroless/static:nonroot + +LABEL name="PodScale Controller" + +COPY podscale-controller /usr/local/bin/ + +CMD ["podscale-controller"] diff --git a/pkg/containerscale-controller/Makefile b/pkg/podscale-controller/Makefile similarity index 96% rename from pkg/containerscale-controller/Makefile rename to pkg/podscale-controller/Makefile index dae331f..32e0a4a 100644 --- a/pkg/containerscale-controller/Makefile +++ b/pkg/podscale-controller/Makefile @@ -1,5 +1,5 @@ BUILD_SETTINGS = CGO_ENABLED=0 GOOS=linux GOARCH=amd64 -IMAGE = containerscale-controller +IMAGE = podscale-controller IMAGE_VERSION = $(shell git tag --points-at HEAD | sed '/$(IMAGE)\/.*/!s/.*//' | sed 's/\//:/') REPO = systemautoscaler diff --git a/pkg/podscale-controller/README.md b/pkg/podscale-controller/README.md new file mode 100644 index 0000000..31e3c10 --- /dev/null +++ b/pkg/podscale-controller/README.md @@ -0,0 +1,9 @@ +# PodScale Controller + +PodScale Controller manages `PodScale` resources lifecycle, dealing with their creation and deletion. + +## PodScale lifecyle + +Once a new `ServiceLevelAgreement` is deployed into a namespace, the controller will try to find a set of `Services` compatible with the `serviceSelector` and will create a new `PodScale` for each `Pod`. The match is currently done by setting the `MatchLabels` field inside the Selector but a further analysis has to be done regarding the `Selector` strategy since the `MatchExpressions` will not be used. +After the `PodScale` creation, the controller will try to keep the set of `PodScale` up to date with `Pod` resources, handling changes in the number of replicas and `Pod` deletions. What is not covered at the moment is specified in this [issue] (https://github.com/lterrac/system-autoscaler/issues/2). +When the `ServiceLevelAgreement` is deleted from the namespace, all the `PodScale` resources generated from it will be also deleted, leaving the namespace as it was before introducing the Agreement. diff --git a/pkg/containerscale-controller/build.sh b/pkg/podscale-controller/build.sh similarity index 100% rename from pkg/containerscale-controller/build.sh rename to pkg/podscale-controller/build.sh diff --git a/pkg/podscale-controller/containerscale-controller b/pkg/podscale-controller/containerscale-controller new file mode 100755 index 0000000..4be8b84 Binary files /dev/null and b/pkg/podscale-controller/containerscale-controller differ diff --git a/pkg/containerscale-controller/main.go b/pkg/podscale-controller/main.go similarity index 91% rename from pkg/containerscale-controller/main.go rename to pkg/podscale-controller/main.go index 8243108..d70de19 100644 --- a/pkg/containerscale-controller/main.go +++ b/pkg/podscale-controller/main.go @@ -2,9 +2,10 @@ package main import ( "flag" - informers2 "github.com/lterrac/system-autoscaler/pkg/informers" "time" + informers2 "github.com/lterrac/system-autoscaler/pkg/informers" + sainformers "github.com/lterrac/system-autoscaler/pkg/generated/informers/externalversions" coreinformers "k8s.io/client-go/informers" @@ -15,9 +16,8 @@ import ( // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - containerScaleController "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/controller" clientset "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" + podScaleController "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/controller" "github.com/lterrac/system-autoscaler/pkg/signals" ) @@ -64,11 +64,11 @@ func main() { Pod: coreInformerFactory.Core().V1().Pods(), Node: coreInformerFactory.Core().V1().Nodes(), Service: coreInformerFactory.Core().V1().Services(), - ContainerScale: saInformerFactory.Systemautoscaler().V1beta1().ContainerScales(), + PodScale: saInformerFactory.Systemautoscaler().V1beta1().PodScales(), ServiceLevelAgreement: saInformerFactory.Systemautoscaler().V1beta1().ServiceLevelAgreements(), } - controller := containerScaleController.NewController( + controller := podScaleController.NewController( kubeClient, systemAutoscalerClient, informers, diff --git a/pkg/containerscale-controller/pkg/controller/controller.go b/pkg/podscale-controller/pkg/controller/controller.go similarity index 77% rename from pkg/containerscale-controller/pkg/controller/controller.go rename to pkg/podscale-controller/pkg/controller/controller.go index b6e0284..a69c339 100644 --- a/pkg/containerscale-controller/pkg/controller/controller.go +++ b/pkg/podscale-controller/pkg/controller/controller.go @@ -26,7 +26,7 @@ const ( // AgentName is the controller name used // both in logs and labels to identify it - AgentName = "containerscale-controller" + AgentName = "podscale-controller" // SubjectToLabel is used to identify the ServiceLevelAgreement // matched by the Service @@ -40,35 +40,35 @@ const ( // process a pod that does not have the container specified in the Service Level Agreement ContainerNotFound = "Container not found" - // SuccessSynced is used as part of the Event 'reason' when a containerScale is synced + // SuccessSynced is used as part of the Event 'reason' when a podScale is synced SuccessSynced = "Synced" - // MessageResourceSynced is the message used for an Event fired when a containerScale + // MessageResourceSynced is the message used for an Event fired when a podScale // is synced successfully - MessageResourceSynced = "containerScale synced successfully" + MessageResourceSynced = "podScale synced successfully" ) -// Controller is the controller implementation for containerScale resources +// Controller is the controller implementation for podScale resources type Controller struct { - kubeClientset kubernetes.Interface - containerScalesClientset clientset.Interface + kubeClientset kubernetes.Interface + podScalesClientset clientset.Interface listers informers.Listers - slasSynced cache.InformerSynced - containerScalesSynced cache.InformerSynced - servicesSynced cache.InformerSynced - podSynced cache.InformerSynced + slasSynced cache.InformerSynced + podScalesSynced cache.InformerSynced + servicesSynced cache.InformerSynced + podSynced cache.InformerSynced slasworkqueue queue.Queue recorder record.EventRecorder } -// NewController returns a new ContainerScale controller +// NewController returns a new PodScale controller func NewController( kubeClient kubernetes.Interface, - containerScalesClient clientset.Interface, + podScalesClient clientset.Interface, informers informers.Informers) *Controller { // Create event broadcaster @@ -85,15 +85,15 @@ func NewController( recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: AgentName}) controller := &Controller{ - kubeClientset: kubeClient, - containerScalesClientset: containerScalesClient, + kubeClientset: kubeClient, + podScalesClientset: podScalesClient, listers: informers.GetListers(), - slasSynced: informers.ServiceLevelAgreement.Informer().HasSynced, - containerScalesSynced: informers.ContainerScale.Informer().HasSynced, - servicesSynced: informers.Service.Informer().HasSynced, - podSynced: informers.Pod.Informer().HasSynced, + slasSynced: informers.ServiceLevelAgreement.Informer().HasSynced, + podScalesSynced: informers.PodScale.Informer().HasSynced, + servicesSynced: informers.Service.Informer().HasSynced, + podSynced: informers.Pod.Informer().HasSynced, slasworkqueue: queue.NewQueue("ServiceLevelAgreements"), recorder: recorder, @@ -119,20 +119,20 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer c.slasworkqueue.ShutDown() // Start the informer factories to begin populating the informer caches - klog.Info("Starting containerScale controller") + klog.Info("Starting podScale controller") // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, c.slasSynced, c.servicesSynced, - c.containerScalesSynced, + c.podScalesSynced, c.podSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.Info("Starting workers") - // Launch two workers to process containerScale resources + // Launch two workers to process podScale resources for i := 0; i < threadiness; i++ { go wait.Until(c.runWorker, time.Second, stopCh) } diff --git a/pkg/containerscale-controller/pkg/controller/handler.go b/pkg/podscale-controller/pkg/controller/handler.go similarity index 100% rename from pkg/containerscale-controller/pkg/controller/handler.go rename to pkg/podscale-controller/pkg/controller/handler.go diff --git a/pkg/containerscale-controller/pkg/controller/sync.go b/pkg/podscale-controller/pkg/controller/sync.go similarity index 63% rename from pkg/containerscale-controller/pkg/controller/sync.go rename to pkg/podscale-controller/pkg/controller/sync.go index c6364d2..582ec1d 100644 --- a/pkg/containerscale-controller/pkg/controller/sync.go +++ b/pkg/podscale-controller/pkg/controller/sync.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/utils" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/utils" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,7 +32,7 @@ func (c *Controller) syncServiceLevelAgreement(key string) error { // The SLA resource may no longer exist, in which case we stop // processing. if errors.IsNotFound(err) { - // ContainerScale cleanup is achieved via OwnerReferences + // PodScale cleanup is achieved via OwnerReferences utilruntime.HandleError(fmt.Errorf("ServiceLevelAgreement '%s' in work queue no longer exists", key)) return nil } @@ -70,10 +70,10 @@ func (c *Controller) syncServiceLevelAgreement(key string) error { // return nil //} - // adjust Service's ContainerScale according to its Pods + // adjust Service's PodScale according to its Pods err = c.syncService(namespace, service, sla) if err != nil { - utilruntime.HandleError(fmt.Errorf("error while syncing ContainerScales for Service '%s'", service.GetName())) + utilruntime.HandleError(fmt.Errorf("error while syncing PodScales for Service '%s'", service.GetName())) utilruntime.HandleError(err) return nil } @@ -89,11 +89,11 @@ func (c *Controller) syncServiceLevelAgreement(key string) error { } } - // Once the service, pod and containerscales adhere to the desired state derived from SLA - // delete old ContainerScale without a Service matched due to a change in ServiceSelector + // Once the service, pod and podscales adhere to the desired state derived from SLA + // delete old PodScale without a Service matched due to a change in ServiceSelector err = c.handleServiceSelectorChange(actual, desired, namespace) if err != nil { - utilruntime.HandleError(fmt.Errorf("error while cleaning ContainerScales due to a ServiceSelector change")) + utilruntime.HandleError(fmt.Errorf("error while cleaning PodScales due to a ServiceSelector change")) return nil } @@ -107,19 +107,19 @@ func (c *Controller) handleServiceSelectorChange(actual []*corev1.Service, desir if utils.ContainsService(desired, service) { continue } - // get all containerscales currently associated to a Service - containerscaleSelector := labels.Set(service.Spec.Selector).AsSelector() - containerscales, err := c.listers.ContainerScaleLister.List(containerscaleSelector) + // get all podscales currently associated to a Service + podscaleSelector := labels.Set(service.Spec.Selector).AsSelector() + podscales, err := c.listers.PodScaleLister.List(podscaleSelector) if err != nil { - utilruntime.HandleError(fmt.Errorf("error while getting ContainerScales for Service '%s'", service.GetName())) + utilruntime.HandleError(fmt.Errorf("error while getting PodScales for Service '%s'", service.GetName())) return nil } - for _, p := range containerscales { - err := c.containerScalesClientset.SystemautoscalerV1beta1().ContainerScales(namespace).Delete(context.TODO(), p.Name, metav1.DeleteOptions{}) + for _, p := range podscales { + err := c.podScalesClientset.SystemautoscalerV1beta1().PodScales(namespace).Delete(context.TODO(), p.Name, metav1.DeleteOptions{}) if err != nil { - utilruntime.HandleError(fmt.Errorf("error while deleting ContainerScale for Service '%s'", service.GetName())) + utilruntime.HandleError(fmt.Errorf("error while deleting PodScale for Service '%s'", service.GetName())) return nil } } @@ -128,9 +128,9 @@ func (c *Controller) handleServiceSelectorChange(actual []*corev1.Service, desir } // syncService keeps a Service up to date with the corresponding ServiceLevelAgreement -// by creating and deleting the corresponding `ContainerScale` resources. It uses the `Selector` -// to retrive the corresponding `Pod` and `ContainerScale`. The `Pod` resources are used as -// a desired state so `ContainerScale` are changed accordingly. +// by creating and deleting the corresponding `PodScale` resources. It uses the `Selector` +// to retrive the corresponding `Pod` and `PodScale`. The `Pod` resources are used as +// a desired state so `PodScale` are changed accordingly. func (c *Controller) syncService(namespace string, service *corev1.Service, sla *v1beta1.ServiceLevelAgreement) error { label := labels.Set(service.Spec.Selector) pods, err := c.listers.PodLister.List(label.AsSelector()) @@ -140,14 +140,14 @@ func (c *Controller) syncService(namespace string, service *corev1.Service, sla return nil } - containerscales, err := c.listers.ContainerScaleLister.List(label.AsSelector()) + podscales, err := c.listers.PodScaleLister.List(label.AsSelector()) if err != nil { - utilruntime.HandleError(fmt.Errorf("error while getting ContainerScales for Service '%s'", service.GetName())) + utilruntime.HandleError(fmt.Errorf("error while getting PodScales for Service '%s'", service.GetName())) return nil } - stateDiff := utils.DiffPods(pods, containerscales) + stateDiff := utils.DiffPods(pods, podscales) for _, pod := range stateDiff.AddList { //TODO: change when a policy to handle other QOS class will be discussed @@ -156,28 +156,28 @@ func (c *Controller) syncService(namespace string, service *corev1.Service, sla continue } - // do not create the containerscale if the specified container does not exists within the Pod + // do not create the podscale if the specified container does not exists within the Pod if !utils.HasContainer(pod.Spec.Containers, sla.Spec.Service.Container) { c.recorder.Eventf(pod, corev1.EventTypeWarning, ContainerNotFound, "Pod %s/%s does not have container %s", pod.Namespace, pod.Name, sla.Spec.Service.Container) continue } - containerscale := NewContainerScale(pod, sla, service, label) + podscale := NewPodScale(pod, sla, service, label) - _, err := c.containerScalesClientset.SystemautoscalerV1beta1().ContainerScales(namespace).Create(context.TODO(), containerscale, metav1.CreateOptions{}) + _, err := c.podScalesClientset.SystemautoscalerV1beta1().PodScales(namespace).Create(context.TODO(), podscale, metav1.CreateOptions{}) if err != nil && !errors.IsAlreadyExists(err) { - utilruntime.HandleError(fmt.Errorf("error while creating ContainerScale for Pod '%s'", containerscale.GetName())) + utilruntime.HandleError(fmt.Errorf("error while creating PodScale for Pod '%s'", podscale.GetName())) utilruntime.HandleError(err) return nil } } - for _, containerscale := range stateDiff.DeleteList { + for _, podscale := range stateDiff.DeleteList { - err := c.containerScalesClientset.SystemautoscalerV1beta1().ContainerScales(namespace).Delete(context.TODO(), containerscale.Name, metav1.DeleteOptions{}) + err := c.podScalesClientset.SystemautoscalerV1beta1().PodScales(namespace).Delete(context.TODO(), podscale.Name, metav1.DeleteOptions{}) if err != nil { - utilruntime.HandleError(fmt.Errorf("error while deleting ContainerScale for Pod '%s'", containerscale.Name)) + utilruntime.HandleError(fmt.Errorf("error while deleting PodScale for Pod '%s'", podscale.Name)) utilruntime.HandleError(err) return nil } @@ -186,9 +186,9 @@ func (c *Controller) syncService(namespace string, service *corev1.Service, sla return nil } -// NewContainerScale creates a new ContainerScale resource using the corresponding Pod and ServiceLevelAgreement infos. +// NewPodScale creates a new PodScale resource using the corresponding Pod and ServiceLevelAgreement infos. // The SLA is the resource Owner in order to enable garbage collection on its deletion. -func NewContainerScale(pod *corev1.Pod, sla *v1beta1.ServiceLevelAgreement, service *corev1.Service, selectorLabels labels.Set) *v1beta1.ContainerScale { +func NewPodScale(pod *corev1.Pod, sla *v1beta1.ServiceLevelAgreement, service *corev1.Service, selectorLabels labels.Set) *v1beta1.PodScale { podLabels := make(labels.Set) for k, v := range selectorLabels { @@ -197,10 +197,10 @@ func NewContainerScale(pod *corev1.Pod, sla *v1beta1.ServiceLevelAgreement, serv podLabels["system.autoscaler/node"] = pod.Spec.NodeName - return &v1beta1.ContainerScale{ + return &v1beta1.PodScale{ TypeMeta: metav1.TypeMeta{ APIVersion: "systemautoscaler.polimi.it/v1beta1", - Kind: "ContainerScale", + Kind: "PodScale", }, ObjectMeta: metav1.ObjectMeta{ Name: "pod-" + pod.GetName(), @@ -215,23 +215,15 @@ func NewContainerScale(pod *corev1.Pod, sla *v1beta1.ServiceLevelAgreement, serv }, }, }, - Spec: v1beta1.ContainerScaleSpec{ - SLARef: v1beta1.SLARef{ - Name: sla.GetName(), - Namespace: sla.GetNamespace(), - }, - PodRef: v1beta1.PodRef{ - Name: pod.GetName(), - Namespace: pod.GetNamespace(), - }, - ServiceRef: v1beta1.ServiceRef{ - Name: service.GetName(), - Namespace: service.GetNamespace(), - }, + Spec: v1beta1.PodScaleSpec{ + Namespace: sla.GetNamespace(), + SLA: sla.GetName(), + Pod: pod.GetName(), + Service: service.GetName(), Container: sla.Spec.Service.Container, DesiredResources: sla.Spec.DefaultResources, }, - Status: v1beta1.ContainerScaleStatus{ + Status: v1beta1.PodScaleStatus{ ActualResources: sla.Spec.DefaultResources, CappedResources: sla.Spec.DefaultResources, }, diff --git a/pkg/containerscale-controller/pkg/e2e/controller_ginkgo_test.go b/pkg/podscale-controller/pkg/e2e/controller_ginkgo_test.go similarity index 85% rename from pkg/containerscale-controller/pkg/e2e/controller_ginkgo_test.go rename to pkg/podscale-controller/pkg/e2e/controller_ginkgo_test.go index e860f23..a8c57b7 100644 --- a/pkg/containerscale-controller/pkg/e2e/controller_ginkgo_test.go +++ b/pkg/podscale-controller/pkg/e2e/controller_ginkgo_test.go @@ -11,8 +11,8 @@ import ( "k8s.io/client-go/tools/record" systemautoscaler "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/controller" - . "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/controller" + "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/controller" + . "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/controller" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -24,11 +24,11 @@ const namespace = "e2e" const timeout = 40 * time.Second const interval = 1 * time.Second -var _ = Describe("ContainerScale controller", func() { +var _ = Describe("PodScale controller", func() { Context("With an application deployed inside the cluster", func() { ctx := context.Background() - It("Creates the containerscale if it matches the SLA service selector", func() { + It("Creates the podscale if it matches the SLA service selector", func() { slaName := "foo-sla" appName := "foo-app" @@ -54,12 +54,11 @@ var _ = Describe("ContainerScale controller", func() { }, timeout, interval).Should(BeTrue()) Eventually(func() bool { - actual, err := saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Get(ctx, "pod-"+pod.GetName(), metav1.GetOptions{}) + actual, err := saClient.SystemautoscalerV1beta1().PodScales(namespace).Get(ctx, "pod-"+pod.GetName(), metav1.GetOptions{}) return err == nil && - actual.Spec.PodRef.Name == pod.GetName() && - actual.Spec.PodRef.Namespace == namespace && - actual.Spec.SLARef.Name == sla.GetName() && - actual.Spec.SLARef.Namespace == namespace + actual.Spec.Pod == pod.GetName() && + actual.Spec.Namespace == namespace && + actual.Spec.SLA == sla.GetName() }, timeout, interval).Should(BeTrue()) // resource cleanup @@ -95,12 +94,11 @@ var _ = Describe("ContainerScale controller", func() { Expect(err).ShouldNot(HaveOccurred()) Eventually(func() bool { - actual, err := saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Get(ctx, "pod-"+matchedPod.GetName(), metav1.GetOptions{}) + actual, err := saClient.SystemautoscalerV1beta1().PodScales(namespace).Get(ctx, "pod-"+matchedPod.GetName(), metav1.GetOptions{}) return err == nil && - actual.Spec.PodRef.Name == matchedPod.GetName() && - actual.Spec.PodRef.Namespace == namespace && - actual.Spec.SLARef.Name == sla.GetName() && - actual.Spec.SLARef.Namespace == namespace + actual.Spec.Pod == matchedPod.GetName() && + actual.Spec.Namespace == namespace && + actual.Spec.SLA == sla.GetName() }, timeout, interval).Should(BeTrue()) sla.Spec.Service = &systemautoscaler.Service{ @@ -114,7 +112,7 @@ var _ = Describe("ContainerScale controller", func() { Expect(err).ShouldNot(HaveOccurred()) Eventually(func() bool { - _, err := saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Get(ctx, "pod-"+matchedPod.GetName(), metav1.GetOptions{}) + _, err := saClient.SystemautoscalerV1beta1().PodScales(namespace).Get(ctx, "pod-"+matchedPod.GetName(), metav1.GetOptions{}) return apierrors.IsNotFound(err) }, timeout, interval).Should(BeTrue()) @@ -131,7 +129,7 @@ var _ = Describe("ContainerScale controller", func() { Context("With a Service Level Agreement matching and application", func() { ctx := context.Background() - It("Changes the containerscales based on existing pods increasing them when a pod is added", func() { + It("Changes the podscales based on existing pods increasing them when a pod is added", func() { oldServiceSelector := map[string]string{ "app": "foo", } @@ -150,14 +148,13 @@ var _ = Describe("ContainerScale controller", func() { Expect(err).ShouldNot(HaveOccurred()) Eventually(func() bool { - actual, err := saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Get(ctx, "pod-"+matchedPod.GetName(), metav1.GetOptions{}) + actual, err := saClient.SystemautoscalerV1beta1().PodScales(namespace).Get(ctx, "pod-"+matchedPod.GetName(), metav1.GetOptions{}) return err == nil && actual.Spec.Container == matchedPod.Spec.Containers[0].Name && - actual.Spec.PodRef.Name == matchedPod.GetName() && - actual.Spec.PodRef.Namespace == namespace && - actual.Spec.SLARef.Name == sla.GetName() && - actual.Spec.SLARef.Namespace == namespace + actual.Spec.Pod == matchedPod.GetName() && + actual.Spec.Namespace == namespace && + actual.Spec.SLA == sla.GetName() }, timeout, interval).Should(BeTrue()) newPod := &corev1.Pod{ @@ -196,42 +193,41 @@ var _ = Describe("ContainerScale controller", func() { Expect(err).ShouldNot(HaveOccurred()) Eventually(func() bool { - actual, err := saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Get(ctx, "pod-"+newPod.GetName(), metav1.GetOptions{}) + actual, err := saClient.SystemautoscalerV1beta1().PodScales(namespace).Get(ctx, "pod-"+newPod.GetName(), metav1.GetOptions{}) return err == nil && actual.Spec.Container == newPod.Spec.Containers[0].Name && - actual.Spec.PodRef.Name == newPod.GetName() && - actual.Spec.PodRef.Namespace == namespace && - actual.Spec.SLARef.Name == sla.GetName() && - actual.Spec.SLARef.Namespace == namespace + actual.Spec.Pod == newPod.GetName() && + actual.Spec.Namespace == namespace && + actual.Spec.SLA == sla.GetName() }, timeout, interval).Should(BeTrue()) Eventually(func() bool { - containerscales, containerscaleErr := saClient.SystemautoscalerV1beta1().ContainerScales(namespace).List(ctx, metav1.ListOptions{ + podscales, podscaleErr := saClient.SystemautoscalerV1beta1().PodScales(namespace).List(ctx, metav1.ListOptions{ LabelSelector: labels.Set(matchedSvc.Spec.Selector).AsSelector().String(), }) pods, podErr := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) - return containerscaleErr == nil && + return podscaleErr == nil && podErr == nil && - len(containerscales.Items) == len(pods.Items) + len(podscales.Items) == len(pods.Items) }, timeout, interval).Should(BeTrue()) err = kubeClient.CoreV1().Pods(namespace).Delete(ctx, newPod.GetName(), metav1.DeleteOptions{}) Expect(err).ShouldNot(HaveOccurred()) Eventually(func() bool { - _, err := saClient.SystemautoscalerV1beta1().ContainerScales(namespace).Get(ctx, "pod-"+newPod.GetName(), metav1.GetOptions{}) + _, err := saClient.SystemautoscalerV1beta1().PodScales(namespace).Get(ctx, "pod-"+newPod.GetName(), metav1.GetOptions{}) return apierrors.IsNotFound(err) }, timeout, interval).Should(BeTrue()) Eventually(func() bool { - containerscales, containerscaleErr := saClient.SystemautoscalerV1beta1().ContainerScales(namespace).List(ctx, metav1.ListOptions{ + podscales, podscaleErr := saClient.SystemautoscalerV1beta1().PodScales(namespace).List(ctx, metav1.ListOptions{ LabelSelector: labels.Set(matchedSvc.Spec.Selector).AsSelector().String(), }) pods, podErr := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) - return containerscaleErr == nil && + return podscaleErr == nil && podErr == nil && - len(containerscales.Items) == len(pods.Items) + len(podscales.Items) == len(pods.Items) }, timeout, interval).Should(BeTrue()) // resource cleanup diff --git a/pkg/containerscale-controller/pkg/e2e/controller_suite_test.go b/pkg/podscale-controller/pkg/e2e/controller_suite_test.go similarity index 91% rename from pkg/containerscale-controller/pkg/e2e/controller_suite_test.go rename to pkg/podscale-controller/pkg/e2e/controller_suite_test.go index 22336a3..7696331 100644 --- a/pkg/containerscale-controller/pkg/e2e/controller_suite_test.go +++ b/pkg/podscale-controller/pkg/e2e/controller_suite_test.go @@ -15,8 +15,8 @@ import ( . "github.com/onsi/gomega" systemautoscalerv1beta1 "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" - containerscale "github.com/lterrac/system-autoscaler/pkg/containerscale-controller/pkg/controller" systemautoscaler "github.com/lterrac/system-autoscaler/pkg/generated/clientset/versioned" + podscale "github.com/lterrac/system-autoscaler/pkg/podscale-controller/pkg/controller" . "github.com/onsi/ginkgo" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -66,13 +66,13 @@ var _ = BeforeSuite(func(done Done) { Pod: coreInformerFactory.Core().V1().Pods(), Node: coreInformerFactory.Core().V1().Nodes(), Service: coreInformerFactory.Core().V1().Services(), - ContainerScale: crdInformerFactory.Systemautoscaler().V1beta1().ContainerScales(), + PodScale: crdInformerFactory.Systemautoscaler().V1beta1().PodScales(), ServiceLevelAgreement: crdInformerFactory.Systemautoscaler().V1beta1().ServiceLevelAgreements(), } By("bootstrapping controller") - controller := containerscale.NewController( + controller := podscale.NewController( kubeClient, saClient, informers, diff --git a/pkg/podscale-controller/pkg/types/types.go b/pkg/podscale-controller/pkg/types/types.go new file mode 100644 index 0000000..a6fab46 --- /dev/null +++ b/pkg/podscale-controller/pkg/types/types.go @@ -0,0 +1,34 @@ +package types + +import ( + "fmt" + + "github.com/lterrac/system-autoscaler/pkg/apis/systemautoscaler/v1beta1" +) + +// NodeScales is used to group podscales by node. +type NodeScales struct { + Node string + PodScales []*v1beta1.PodScale +} + +func (n *NodeScales) Contains(name, namespace string) bool { + for _, podscale := range n.PodScales { + if podscale.Spec.Namespace == namespace && + podscale.Spec.Pod == name { + return true + } + } + return false +} + +func (n *NodeScales) Remove(name, namespace string) (*v1beta1.PodScale, error) { + for i, podscale := range n.PodScales { + if podscale.Spec.Namespace == namespace && + podscale.Spec.Pod == name { + n.PodScales = append(n.PodScales[:i], n.PodScales[i+1:]...) + return podscale, nil + } + } + return nil, fmt.Errorf("error: missing %#v-%#v in node %#v", namespace, name, n.Node) +} diff --git a/pkg/containerscale-controller/pkg/utils/utils.go b/pkg/podscale-controller/pkg/utils/utils.go similarity index 70% rename from pkg/containerscale-controller/pkg/utils/utils.go rename to pkg/podscale-controller/pkg/utils/utils.go index a7679ea..c999329 100644 --- a/pkg/containerscale-controller/pkg/utils/utils.go +++ b/pkg/podscale-controller/pkg/utils/utils.go @@ -9,16 +9,16 @@ import ( // the declared state. type StateDiff struct { AddList []*corev1.Pod - DeleteList []*v1beta1.ContainerScale + DeleteList []*v1beta1.PodScale } // DiffPods returns `Pods` that does not already have an associated -// `ContainerScale` resource and the old `ContainerScale` resources to delete. -func DiffPods(pods []*corev1.Pod, scales []*v1beta1.ContainerScale) (result StateDiff) { +// `PodScale` resource and the old `PodScale` resources to delete. +func DiffPods(pods []*corev1.Pod, scales []*v1beta1.PodScale) (result StateDiff) { blueprint := make(map[string]bool) - for _, containerscale := range scales { - blueprint[containerscale.Spec.PodRef.Name] = true + for _, podscale := range scales { + blueprint[podscale.Spec.Pod] = true } for _, pod := range pods { @@ -33,9 +33,9 @@ func DiffPods(pods []*corev1.Pod, scales []*v1beta1.ContainerScale) (result Stat blueprint[pod.Name] = true } - for _, containerscale := range scales { - if !blueprint[containerscale.Spec.PodRef.Name] { - result.DeleteList = append(result.DeleteList, containerscale) + for _, podscale := range scales { + if !blueprint[podscale.Spec.Pod] { + result.DeleteList = append(result.DeleteList, podscale) } } diff --git a/pkg/containerscale-controller/pkg/utils/utils_test.go b/pkg/podscale-controller/pkg/utils/utils_test.go similarity index 52% rename from pkg/containerscale-controller/pkg/utils/utils_test.go rename to pkg/podscale-controller/pkg/utils/utils_test.go index 0639d65..40ec63a 100644 --- a/pkg/containerscale-controller/pkg/utils/utils_test.go +++ b/pkg/podscale-controller/pkg/utils/utils_test.go @@ -33,99 +33,91 @@ func TestDiffPods(t *testing.T) { }, } - containerscales := []*v1beta1.ContainerScale{ + podscales := []*v1beta1.PodScale{ { - Spec: v1beta1.ContainerScaleSpec{ - PodRef: v1beta1.PodRef{ - Name: "foo", - Namespace: "default", - }, + Spec: v1beta1.PodScaleSpec{ + Pod: "foo", + Namespace: "default", }, }, { - Spec: v1beta1.ContainerScaleSpec{ - PodRef: v1beta1.PodRef{ - Name: "bar", - Namespace: "default", - }, + Spec: v1beta1.PodScaleSpec{ + Pod: "bar", + Namespace: "default", }, }, { - Spec: v1beta1.ContainerScaleSpec{ - PodRef: v1beta1.PodRef{ - Name: "foobar", - Namespace: "default", - }, + Spec: v1beta1.PodScaleSpec{ + Pod: "foobar", + Namespace: "default", }, }, { - Spec: v1beta1.ContainerScaleSpec{ - PodRef: v1beta1.PodRef{ - Name: "foobarfoo", - Namespace: "default", - }, + Spec: v1beta1.PodScaleSpec{ + Pod: "foobarfoo", + Namespace: "default", }, }, } testcases := []struct { - description string - pods []*corev1.Pod - containerscales []*v1beta1.ContainerScale - expected StateDiff + description string + pods []*corev1.Pod + podscales []*v1beta1.PodScale + expected StateDiff }{ { - description: "add all pods if there are no containerscales", - pods: pods, - containerscales: []*v1beta1.ContainerScale{}, + description: "add all pods if there are no podscales", + pods: pods, + podscales: []*v1beta1.PodScale{}, expected: StateDiff{ AddList: pods, }, }, { - description: "add only pods without the corresponding containerscales", - pods: pods, - containerscales: containerscales[2:], + description: "add only pods without the corresponding podscales", + pods: pods, + podscales: podscales[2:], expected: StateDiff{ AddList: pods[:2], }, }, { - description: "delete containerscales if there are no pods", - pods: []*corev1.Pod{}, - containerscales: containerscales, + description: "delete podscales if there are no pods", + pods: []*corev1.Pod{}, + podscales: podscales, expected: StateDiff{ - DeleteList: containerscales, + DeleteList: podscales, }, }, { - description: "delete containerscales if the corresponding pod no longer exists", - pods: pods[2:], - containerscales: containerscales, + description: "delete podscales if the corresponding pod no longer exists", + pods: pods[2:], + podscales: podscales, expected: StateDiff{ - DeleteList: containerscales[:2], + DeleteList: podscales[:2], }, }, { - description: "statediff should be empty if pod and containerscales coincide", - pods: pods, - containerscales: containerscales, - expected: StateDiff{}, + description: "statediff should be empty if pod and podscales coincide", + pods: pods, + podscales: podscales, + expected: StateDiff{}, }, { - description: "miscellanea test", - pods: pods[1:], - containerscales: containerscales[:3], + description: "miscellanea test", + pods: pods[1:], + podscales: podscales[:3], expected: StateDiff{ AddList: pods[3:], - DeleteList: containerscales[:1], + DeleteList: podscales[:1], }, }, } for _, tt := range testcases { t.Run(tt.description, func(t *testing.T) { - actual := DiffPods(tt.pods, tt.containerscales) + actual := DiffPods(tt.pods, tt.podscales) require.Equal(t, tt.expected, actual, "StateDiff should coincide") }) }