diff --git a/Makefile b/Makefile index fdcf5ce4b..3f226a860 100644 --- a/Makefile +++ b/Makefile @@ -12,19 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARCHS = amd64 arm64 -COMMONENVVAR=GOOS=$(shell uname -s | tr A-Z a-z) -BUILDENVVAR=CGO_ENABLED=0 +GO_VERSION := $(shell awk '/^go /{print $$2}' go.mod|head -n1) INTEGTESTENVVAR=SCHED_PLUGINS_TEST_VERBOSE=1 -# RELEASE_REGISTRY is the container registry to push +# Manage platform and builders +PLATFORMS ?= linux/amd64,linux/arm64,linux/s390x,linux/ppc64le +BUILDER ?= docker +ifeq ($(BUILDER),podman) + ALL_FLAG=--all +else + ALL_FLAG= +endif + +# REGISTRY is the container registry to push # into. The default is to push to the staging # registry, not production(registry.k8s.io). -RELEASE_REGISTRY?=gcr.io/k8s-staging-scheduler-plugins +REGISTRY?=gcr.io/k8s-staging-scheduler-plugins RELEASE_VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*") RELEASE_IMAGE:=kube-scheduler:$(RELEASE_VERSION) RELEASE_CONTROLLER_IMAGE:=controller:$(RELEASE_VERSION) -GO_BASE_IMAGE?=golang +GO_BASE_IMAGE?=golang:$(GO_VERSION) +DISTROLESS_BASE_IMAGE?=gcr.io/distroless/static:nonroot +EXTRA_ARGS="" # VERSION is the scheduler's version # @@ -40,77 +49,37 @@ all: build .PHONY: build build: build-controller build-scheduler -.PHONY: build.amd64 -build.amd64: build-controller.amd64 build-scheduler.amd64 - -.PHONY: build.arm64v8 -build.arm64v8: build-controller.arm64v8 build-scheduler.arm64v8 - .PHONY: build-controller build-controller: - $(COMMONENVVAR) $(BUILDENVVAR) go build -ldflags '-w' -o bin/controller cmd/controller/controller.go - -.PHONY: build-controller.amd64 -build-controller.amd64: - $(COMMONENVVAR) $(BUILDENVVAR) GOARCH=amd64 go build -ldflags '-w' -o bin/controller cmd/controller/controller.go - -.PHONY: build-controller.arm64v8 -build-controller.arm64v8: - $(COMMONENVVAR) $(BUILDENVVAR) GOARCH=arm64 go build -ldflags '-w' -o bin/controller cmd/controller/controller.go + $(GO_BUILD_ENV) go build -ldflags '-X k8s.io/component-base/version.gitVersion=$(VERSION) -w' -o bin/controller cmd/controller/controller.go .PHONY: build-scheduler build-scheduler: - $(COMMONENVVAR) $(BUILDENVVAR) go build -ldflags '-X k8s.io/component-base/version.gitVersion=$(VERSION) -w' -o bin/kube-scheduler cmd/scheduler/main.go - -.PHONY: build-scheduler.amd64 -build-scheduler.amd64: - $(COMMONENVVAR) $(BUILDENVVAR) GOARCH=amd64 go build -ldflags '-X k8s.io/component-base/version.gitVersion=$(VERSION) -w' -o bin/kube-scheduler cmd/scheduler/main.go + $(GO_BUILD_ENV) go build -ldflags '-X k8s.io/component-base/version.gitVersion=$(VERSION) -w' -o bin/kube-scheduler cmd/scheduler/main.go -.PHONY: build-scheduler.arm64v8 -build-scheduler.arm64v8: - $(COMMONENVVAR) $(BUILDENVVAR) GOARCH=arm64 go build -ldflags '-X k8s.io/component-base/version.gitVersion=$(VERSION) -w' -o bin/kube-scheduler cmd/scheduler/main.go - -.PHONY: local-image -local-image: clean - RELEASE_VERSION=$(RELEASE_VERSION) hack/build-images.sh - -.PHONY: release-image.amd64 -release-image.amd64: clean - ARCH="amd64" \ +.PHONY: build-images +build-images: + BUILDER=$(BUILDER) \ + PLATFORMS=$(PLATFORMS) \ RELEASE_VERSION=$(RELEASE_VERSION) \ - REGISTRY=$(RELEASE_REGISTRY) \ - IMAGE=$(RELEASE_IMAGE)-amd64 \ - CONTROLLER_IMAGE=$(RELEASE_CONTROLLER_IMAGE)-amd64 \ + REGISTRY=$(REGISTRY) \ + IMAGE=$(RELEASE_IMAGE) \ + CONTROLLER_IMAGE=$(RELEASE_CONTROLLER_IMAGE) \ GO_BASE_IMAGE=$(GO_BASE_IMAGE) \ - ALPINE_BASE_IMAGE=$(ALPINE_BASE_IMAGE) \ - hack/build-images.sh + DISTROLESS_BASE_IMAGE=$(DISTROLESS_BASE_IMAGE) \ + DOCKER_BUILDX_CMD=$(DOCKER_BUILDX_CMD) \ + EXTRA_ARGS=$(EXTRA_ARGS) hack/build-images.sh -.PHONY: release-image.arm64v8 -release-image.arm64v8: clean - ARCH="arm64" \ - RELEASE_VERSION=$(RELEASE_VERSION) \ - REGISTRY=$(RELEASE_REGISTRY) \ - IMAGE=$(RELEASE_IMAGE)-arm64 \ - CONTROLLER_IMAGE=$(RELEASE_CONTROLLER_IMAGE)-arm64 \ - GO_BASE_IMAGE=$(GO_BASE_IMAGE) \ - ALPINE_BASE_IMAGE=$(ALPINE_BASE_IMAGE) \ - hack/build-images.sh - -.PHONY: push-release-images -push-release-images: release-image.amd64 release-image.arm64v8 - gcloud auth configure-docker - for arch in $(ARCHS); do \ - docker push $(RELEASE_REGISTRY)/$(RELEASE_IMAGE)-$${arch} ;\ - docker push $(RELEASE_REGISTRY)/$(RELEASE_CONTROLLER_IMAGE)-$${arch} ;\ - done - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(RELEASE_REGISTRY)/$(RELEASE_IMAGE) $(addprefix --amend $(RELEASE_REGISTRY)/$(RELEASE_IMAGE)-, $(ARCHS)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(RELEASE_REGISTRY)/$(RELEASE_CONTROLLER_IMAGE) $(addprefix --amend $(RELEASE_REGISTRY)/$(RELEASE_CONTROLLER_IMAGE)-, $(ARCHS)) - for arch in $(ARCHS); do \ - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate --arch $${arch} $(RELEASE_REGISTRY)/$(RELEASE_IMAGE) $(RELEASE_REGISTRY)/$(RELEASE_IMAGE)-$${arch} ;\ - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate --arch $${arch} $(RELEASE_REGISTRY)/$(RELEASE_CONTROLLER_IMAGE) $(RELEASE_REGISTRY)/$(RELEASE_CONTROLLER_IMAGE)-$${arch} ;\ - done - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(RELEASE_REGISTRY)/$(RELEASE_IMAGE) ;\ - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(RELEASE_REGISTRY)/$(RELEASE_CONTROLLER_IMAGE) ;\ +.PHONY: local-image +local-image: PLATFORMS="linux/$$(uname -m)" +local-image: RELEASE_VERSION="v0.0.0" +local-image: REGISTRY="localhost:5000/scheduler-plugins" +local-image: EXTRA_ARGS="--load" +local-image: clean build-images + +.PHONY: release-images +push-images: EXTRA_ARGS="--push" +push-images: build-images .PHONY: update-vendor update-vendor: diff --git a/README.md b/README.md index d006568ec..5d0c5900d 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ The kube-scheduler binary includes the below list of plugins. They can be config * [Node Resources](pkg/noderesources/README.md) * [Node Resource Topology](pkg/noderesourcetopology/README.md) * [Preemption Toleration](pkg/preemptiontoleration/README.md) -* [Trimaran](pkg/trimaran/README.md) +* [Trimaran (Load-Aware Scheduling)](pkg/trimaran/README.md) * [Network-Aware Scheduling](pkg/networkaware/README.md) Additionally, the kube-scheduler binary includes the below list of sample plugins. These plugins are not intended for use in production @@ -57,34 +57,47 @@ on demand, (e.g., `v0.18.800`) are used to indicated that the k8s client package changed since the previous release, and that only scheduler plugins code (features or bug fixes) was changed. -| Scheduler Plugins | Compiled With k8s Version | Container Image | Arch | -|-------------------|---------------------------|-----------------------------------------------------------|----------------| -| v0.28.9 | v1.28.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 | AMD64
ARM64 | -| v0.27.8 | v1.27.8 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.27.8 | AMD64
ARM64 | -| v0.26.7 | v1.26.7 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.26.7 | AMD64
ARM64 | -| v0.25.12 | v1.25.12 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.25.12 | AMD64
ARM64 | -| v0.24.9 | v1.24.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.24.9 | AMD64
ARM64 | -| v0.23.10 | v1.23.10 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.23.10 | AMD64
ARM64 | -| v0.22.6 | v1.22.6 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.22.6 | AMD64
ARM64 | -| v0.21.6 | v1.21.6 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.21.6 | AMD64
ARM64 | -| v0.20.10 | v1.20.10 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.20.10 | AMD64
ARM64 | -| v0.19.9 | v1.19.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.19.9 | AMD64
ARM64 | -| v0.19.8 | v1.19.8 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.19.8 | AMD64
ARM64 | -| v0.18.9 | v1.18.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.18.9 | AMD64 | - -| Controller | Compiled With k8s Version | Container Image | Arch | -|------------|---------------------------|-------------------------------------------------------|----------------| -| v0.28.9 | v1.28.9 | registry.k8s.io/scheduler-plugins/controller:v0.28.9 | AMD64
ARM64 | -| v0.27.8 | v1.27.8 | registry.k8s.io/scheduler-plugins/controller:v0.27.8 | AMD64
ARM64 | -| v0.26.7 | v1.26.7 | registry.k8s.io/scheduler-plugins/controller:v0.26.7 | AMD64
ARM64 | -| v0.25.12 | v1.25.12 | registry.k8s.io/scheduler-plugins/controller:v0.25.12 | AMD64
ARM64 | -| v0.24.9 | v1.24.9 | registry.k8s.io/scheduler-plugins/controller:v0.24.9 | AMD64
ARM64 | -| v0.23.10 | v1.23.10 | registry.k8s.io/scheduler-plugins/controller:v0.23.10 | AMD64
ARM64 | -| v0.22.6 | v1.22.6 | registry.k8s.io/scheduler-plugins/controller:v0.22.6 | AMD64
ARM64 | -| v0.21.6 | v1.21.6 | registry.k8s.io/scheduler-plugins/controller:v0.21.6 | AMD64
ARM64 | -| v0.20.10 | v1.20.10 | registry.k8s.io/scheduler-plugins/controller:v0.20.10 | AMD64
ARM64 | -| v0.19.9 | v1.19.9 | registry.k8s.io/scheduler-plugins/controller:v0.19.9 | AMD64
ARM64 | -| v0.19.8 | v1.19.8 | registry.k8s.io/scheduler-plugins/controller:v0.19.8 | AMD64
ARM64 | +| Scheduler Plugins | Compiled With k8s Version | Container Image | Arch | +|-------------------|---------------------------|-----------------------------------------------------------|------------------------------------------------------------| +| v0.29.7 | v1.29.7 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 | linux/amd64
linux/arm64
linux/s390x
linux/ppc64le | +| v0.28.9 | v1.28.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 | linux/amd64
linux/arm64 | +| v0.27.8 | v1.27.8 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.27.8 | linux/amd64
linux/arm64 | + +| Controller | Compiled With k8s Version | Container Image | Arch | +|------------|---------------------------|-------------------------------------------------------|------------------------------------------------------------| +| v0.29.7 | v1.29.7 | registry.k8s.io/scheduler-plugins/controller:v0.29.7 | linux/amd64
linux/arm64
linux/s390x
linux/ppc64le | +| v0.28.9 | v1.28.9 | registry.k8s.io/scheduler-plugins/controller:v0.28.9 | linux/amd64
linux/arm64 | +| v0.27.8 | v1.27.8 | registry.k8s.io/scheduler-plugins/controller:v0.27.8 | linux/amd64
linux/arm64 | + +
+Older releases + +| Scheduler Plugins | Compiled With k8s Version | Container Image | Arch | +|-------------------|---------------------------|-----------------------------------------------------------|----------------------------| +| v0.26.7 | v1.26.7 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.26.7 | linux/amd64
linux/arm64 | +| v0.25.12 | v1.25.12 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.25.12 | linux/amd64
linux/arm64 | +| v0.24.9 | v1.24.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.24.9 | linux/amd64
linux/arm64 | +| v0.23.10 | v1.23.10 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.23.10 | linux/amd64
linux/arm64 | +| v0.22.6 | v1.22.6 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.22.6 | linux/amd64
linux/arm64 | +| v0.21.6 | v1.21.6 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.21.6 | linux/amd64
linux/arm64 | +| v0.20.10 | v1.20.10 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.20.10 | linux/amd64
linux/arm64 | +| v0.19.9 | v1.19.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.19.9 | linux/amd64
linux/arm64 | +| v0.19.8 | v1.19.8 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.19.8 | linux/amd64
linux/arm64 | +| v0.18.9 | v1.18.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.18.9 | linux/amd64 | + +| Controller | Compiled With k8s Version | Container Image | Arch | +|------------|---------------------------|-------------------------------------------------------|----------------------------| +| v0.26.7 | v1.26.7 | registry.k8s.io/scheduler-plugins/controller:v0.26.7 | linux/amd64
linux/arm64 | +| v0.25.12 | v1.25.12 | registry.k8s.io/scheduler-plugins/controller:v0.25.12 | linux/amd64
linux/arm64 | +| v0.24.9 | v1.24.9 | registry.k8s.io/scheduler-plugins/controller:v0.24.9 | linux/amd64
linux/arm64 | +| v0.23.10 | v1.23.10 | registry.k8s.io/scheduler-plugins/controller:v0.23.10 | linux/amd64
linux/arm64 | +| v0.22.6 | v1.22.6 | registry.k8s.io/scheduler-plugins/controller:v0.22.6 | linux/amd64
linux/arm64 | +| v0.21.6 | v1.21.6 | registry.k8s.io/scheduler-plugins/controller:v0.21.6 | linux/amd64
linux/arm64 | +| v0.20.10 | v1.20.10 | registry.k8s.io/scheduler-plugins/controller:v0.20.10 | linux/amd64
linux/arm64 | +| v0.19.9 | v1.19.9 | registry.k8s.io/scheduler-plugins/controller:v0.19.9 | linux/amd64
linux/arm64 | +| v0.19.8 | v1.19.8 | registry.k8s.io/scheduler-plugins/controller:v0.19.8 | linux/amd64
linux/arm64 | + +
## Community, discussion, contribution, and support diff --git a/RESYNC.log.md b/RESYNC.log.md index 2e892c5d1..0a495ae54 100644 --- a/RESYNC.log.md +++ b/RESYNC.log.md @@ -1,5 +1,6 @@ | Resync Date | Merge With Upstream Tag/Commit | Author | |-------------|------------------------------------------------------------------------------------------------------|-------------| +| 2024.09.26 | https://github.com/kubernetes-sigs/scheduler-plugins/commit/bb56af11184a0f6ed33e2fc8b189a5b1ccfc60e4 | ffromani | | 2024.06.24 | https://github.com/kubernetes-sigs/scheduler-plugins/commit/2c1c0cfe6134c5d55a23dae1726264664a943f4b | ffromani | | 2024.05.29 | https://github.com/kubernetes-sigs/scheduler-plugins/commit/0834feb92676712cebe8290615ce1c47537fe078 | ffromani | | 2024.05.07 | https://github.com/kubernetes-sigs/scheduler-plugins/commit/70981813a19f16c4202f6f74a2525bf917b63685 | ffromani | diff --git a/apis/config/v1/zz_generated.conversion.go b/apis/config/v1/zz_generated.conversion.go index e86d59784..68e7ecfd4 100644 --- a/apis/config/v1/zz_generated.conversion.go +++ b/apis/config/v1/zz_generated.conversion.go @@ -355,6 +355,7 @@ func autoConvert_v1_NodeResourceTopologyCache_To_config_NodeResourceTopologyCach out.ForeignPodsDetect = (*config.ForeignPodsDetectMode)(unsafe.Pointer(in.ForeignPodsDetect)) out.ResyncMethod = (*config.CacheResyncMethod)(unsafe.Pointer(in.ResyncMethod)) out.InformerMode = (*config.CacheInformerMode)(unsafe.Pointer(in.InformerMode)) + out.ResyncScope = (*config.CacheResyncScope)(unsafe.Pointer(in.ResyncScope)) return nil } @@ -367,6 +368,7 @@ func autoConvert_config_NodeResourceTopologyCache_To_v1_NodeResourceTopologyCach out.ForeignPodsDetect = (*ForeignPodsDetectMode)(unsafe.Pointer(in.ForeignPodsDetect)) out.ResyncMethod = (*CacheResyncMethod)(unsafe.Pointer(in.ResyncMethod)) out.InformerMode = (*CacheInformerMode)(unsafe.Pointer(in.InformerMode)) + out.ResyncScope = (*CacheResyncScope)(unsafe.Pointer(in.ResyncScope)) return nil } diff --git a/apis/config/v1/zz_generated.deepcopy.go b/apis/config/v1/zz_generated.deepcopy.go index e725ae2ab..5a5d55adc 100644 --- a/apis/config/v1/zz_generated.deepcopy.go +++ b/apis/config/v1/zz_generated.deepcopy.go @@ -225,6 +225,11 @@ func (in *NodeResourceTopologyCache) DeepCopyInto(out *NodeResourceTopologyCache *out = new(CacheInformerMode) **out = **in } + if in.ResyncScope != nil { + in, out := &in.ResyncScope, &out.ResyncScope + *out = new(CacheResyncScope) + **out = **in + } return } diff --git a/apis/config/zz_generated.deepcopy.go b/apis/config/zz_generated.deepcopy.go index c4ab8cd36..100ca8288 100644 --- a/apis/config/zz_generated.deepcopy.go +++ b/apis/config/zz_generated.deepcopy.go @@ -175,6 +175,11 @@ func (in *NodeResourceTopologyCache) DeepCopyInto(out *NodeResourceTopologyCache *out = new(CacheInformerMode) **out = **in } + if in.ResyncScope != nil { + in, out := &in.ResyncScope, &out.ResyncScope + *out = new(CacheResyncScope) + **out = **in + } return } diff --git a/apis/scheduling/v1alpha1/types.go b/apis/scheduling/v1alpha1/types.go index 40789a0b8..2fb6af295 100644 --- a/apis/scheduling/v1alpha1/types.go +++ b/apis/scheduling/v1alpha1/types.go @@ -146,6 +146,8 @@ type PodGroupSpec struct { // MinMember defines the minimal number of members/tasks to run the pod group; // if there's not enough resources to start all tasks, the scheduler // will not start any. + // The minimum is 1 + // +kubebuilder:validation:Minimum=1 MinMember int32 `json:"minMember,omitempty"` // MinResources defines the minimal resource of members/tasks to run the pod group; diff --git a/build/controller/Dockerfile b/build/controller/Dockerfile index bd2b5c0ea..fb5ba45b0 100644 --- a/build/controller/Dockerfile +++ b/build/controller/Dockerfile @@ -11,19 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -ARG ARCH -ARG GO_BASE_IMAGE=golang -ARG ALPINE_BASE_IMAGE=$ARCH/alpine -FROM $GO_BASE_IMAGE:1.21 +ARG GO_BASE_IMAGE +ARG DISTROLESS_BASE_IMAGE=gcr.io/distroless/static:nonroot +FROM --platform=${BUILDPLATFORM} $GO_BASE_IMAGE AS builder -WORKDIR /go/src/sigs.k8s.io/scheduler-plugins +WORKDIR /workspace COPY . . -ARG ARCH -RUN make build-controller.$ARCH +ARG TARGETARCH +RUN make build-controller GO_BUILD_ENV='CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH}' -FROM $ALPINE_BASE_IMAGE:3.16 - -COPY --from=0 /go/src/sigs.k8s.io/scheduler-plugins/bin/controller /bin/controller +FROM --platform=${BUILDPLATFORM} $DISTROLESS_BASE_IMAGE WORKDIR /bin -CMD ["controller"] +COPY --from=builder /workspace/bin/controller . +USER 65532:65532 + +ENTRYPOINT ["/bin/controller"] diff --git a/build/scheduler/Dockerfile b/build/scheduler/Dockerfile index 40766c817..49e246d50 100644 --- a/build/scheduler/Dockerfile +++ b/build/scheduler/Dockerfile @@ -11,20 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -ARG ARCH -ARG GO_BASE_IMAGE=golang -ARG ALPINE_BASE_IMAGE=$ARCH/alpine -FROM $GO_BASE_IMAGE:1.21 +ARG GO_BASE_IMAGE +ARG DISTROLESS_BASE_IMAGE=gcr.io/distroless/static:nonroot +FROM --platform=${BUILDPLATFORM} $GO_BASE_IMAGE AS builder -WORKDIR /go/src/sigs.k8s.io/scheduler-plugins +WORKDIR /workspace COPY . . -ARG ARCH -ARG RELEASE_VERSION -RUN RELEASE_VERSION=${RELEASE_VERSION} make build-scheduler.$ARCH +ARG TARGETARCH +RUN make build-scheduler GO_BUILD_ENV='CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH}' -FROM $ALPINE_BASE_IMAGE:3.16 - -COPY --from=0 /go/src/sigs.k8s.io/scheduler-plugins/bin/kube-scheduler /bin/kube-scheduler +FROM --platform=${BUILDPLATFORM} $DISTROLESS_BASE_IMAGE WORKDIR /bin -CMD ["kube-scheduler"] +COPY --from=builder /workspace/bin/kube-scheduler . +USER 65532:65532 + +ENTRYPOINT ["/bin/kube-scheduler"] diff --git a/cloudbuild.yaml b/cloudbuild.yaml index 79ecc1b2f..57d7b75a7 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -1,20 +1,22 @@ # See https://cloud.google.com/cloud-build/docs/build-config # this must be specified in seconds. If omitted, defaults to 600s (10 mins) -timeout: 2400s +timeout: 3000s # this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF, # or any new substitutions added in the future. options: substitution_option: ALLOW_LOOSE + machineType: 'N1_HIGHCPU_8' steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90' + # see https://github.com/kubernetes/test-infra/tree/master/config/jobs/image-pushing + - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20240718-5ef92b5c36' entrypoint: make env: - - DOCKER_CLI_EXPERIMENTAL=enabled - RELEASE_VERSION=$_GIT_TAG - BASE_REF=$_PULL_BASE_REF + - DOCKER_BUILDX_CMD=/buildx-entrypoint args: - - push-release-images + - push-images substitutions: # _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and # can be used as a substitution diff --git a/config/crd/bases/scheduling.x-k8s.io_podgroups.yaml b/config/crd/bases/scheduling.x-k8s.io_podgroups.yaml index de2c94b5b..d1e43e16b 100644 --- a/config/crd/bases/scheduling.x-k8s.io_podgroups.yaml +++ b/config/crd/bases/scheduling.x-k8s.io_podgroups.yaml @@ -74,7 +74,9 @@ spec: MinMember defines the minimal number of members/tasks to run the pod group; if there's not enough resources to start all tasks, the scheduler will not start any. + The minimum is 1 format: int32 + minimum: 1 type: integer minResources: additionalProperties: diff --git a/doc/develop.md b/doc/develop.md index d8cf288b5..1d35f3ca3 100644 --- a/doc/develop.md +++ b/doc/develop.md @@ -79,8 +79,7 @@ spec: containers: - image: localhost:5000/scheduler-plugins/kube-scheduler:latest imagePullPolicy: Never - command: - - /bin/kube-scheduler + args: - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf - --config=/etc/kubernetes/configs/scheduler-config.yaml diff --git a/doc/install.md b/doc/install.md index 9bf4be184..2baba39fc 100644 --- a/doc/install.md +++ b/doc/install.md @@ -4,7 +4,7 @@ - [Create a Kubernetes Cluster](#create-a-kubernetes-cluster) -- [Install release v0.28.9 and use Coscheduling](#install-release-v0289-and-use-coscheduling) +- [Install release v0.29.7 and use Coscheduling](#install-release-v0297-and-use-coscheduling) - [As a second scheduler](#as-a-second-scheduler) - [As a single scheduler (replacing the vanilla default-scheduler)](#as-a-single-scheduler-replacing-the-vanilla-default-scheduler) - [Test Coscheduling](#test-coscheduling) @@ -24,7 +24,7 @@ If you do not have a cluster yet, create one by using one of the following provi * [kubeadm](https://kubernetes.io/docs/reference/setup-tools/kubeadm/) * [minikube](https://minikube.sigs.k8s.io/) -## Install release v0.28.9 and use Coscheduling +## Install release v0.29.7 and use Coscheduling Note: we provide two ways to install the scheduler-plugin artifacts: as a second scheduler and as a single scheduler. Their pros and cons are as below: @@ -146,7 +146,7 @@ any vanilla Kubernetes scheduling capability. Instead, a lot of extra out-of-box - - --kubeconfig=/etc/kubernetes/scheduler.conf - - --leader-elect=true 19,20c20 - + image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 + + image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 --- - image: registry.k8s.io/kube-scheduler:v1.28.9 50,52d49 @@ -160,14 +160,14 @@ any vanilla Kubernetes scheduling capability. Instead, a lot of extra out-of-box + name: sched-cc ``` -1. Verify that kube-scheduler pod is running properly with a correct image: `registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9` +1. Verify that kube-scheduler pod is running properly with a correct image: `registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7` ```bash $ kubectl get pod -n kube-system | grep kube-scheduler kube-scheduler-kind-control-plane 1/1 Running 0 3m27s $ kubectl get pods -l component=kube-scheduler -n kube-system -o=jsonpath="{.items[0].spec.containers[0].image}{'\n'}" - registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 + registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 ``` > **⚠️Troubleshooting:** If the kube-scheudler is not up, you may need to restart kubelet service inside the kind control plane (`systemctl restart kubelet.service`) diff --git a/go.mod b/go.mod index a1f7419bf..511b624b2 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,13 @@ module sigs.k8s.io/scheduler-plugins -go 1.21 +go 1.22.0 require ( github.com/containers/common v0.46.0 github.com/diktyo-io/appgroup-api v1.0.1-alpha github.com/diktyo-io/networktopology-api v1.0.1-alpha github.com/dustin/go-humanize v1.0.1 - github.com/go-logr/logr v1.3.0 + github.com/go-logr/logr v1.4.1 github.com/google/go-cmp v0.6.0 github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.2 github.com/k8stopologyawareschedwg/podfingerprint v0.2.2 @@ -16,19 +16,19 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 gonum.org/v1/gonum v0.12.0 - k8s.io/api v0.29.4 - k8s.io/apimachinery v0.29.4 - k8s.io/apiserver v0.29.4 - k8s.io/client-go v0.29.4 - k8s.io/code-generator v0.29.4 - k8s.io/component-base v0.29.4 - k8s.io/component-helpers v0.29.4 + k8s.io/api v0.30.4 + k8s.io/apimachinery v0.30.4 + k8s.io/apiserver v0.30.4 + k8s.io/client-go v0.30.4 + k8s.io/code-generator v0.30.4 + k8s.io/component-base v0.30.4 + k8s.io/component-helpers v0.30.4 k8s.io/klog/hack/tools v0.0.0-20210917071902-331d2323a192 - k8s.io/klog/v2 v2.110.1 - k8s.io/kube-scheduler v0.29.4 - k8s.io/kubernetes v1.29.4 + k8s.io/klog/v2 v2.120.1 + k8s.io/kube-scheduler v0.30.4 + k8s.io/kubernetes v1.30.4 k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/controller-runtime v0.16.5 + sigs.k8s.io/controller-runtime v0.18.5 sigs.k8s.io/security-profiles-operator v0.4.0 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 sigs.k8s.io/yaml v1.3.0 @@ -51,7 +51,7 @@ require ( github.com/distribution/reference v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -62,7 +62,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.17.7 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.1 // indirect @@ -89,7 +89,7 @@ require ( github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cobra v1.7.0 // indirect @@ -107,18 +107,18 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.25.0 // indirect + go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/mod v0.14.0 // indirect + golang.org/x/mod v0.15.0 // indirect golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sync v0.5.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.18.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect @@ -130,18 +130,18 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.29.4 // indirect - k8s.io/cloud-provider v0.29.4 // indirect - k8s.io/controller-manager v0.29.4 // indirect - k8s.io/csi-translation-lib v0.29.4 // indirect - k8s.io/dynamic-resource-allocation v0.29.4 // indirect - k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect - k8s.io/kms v0.29.4 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/kubelet v0.29.4 // indirect - k8s.io/metrics v0.29.4 // indirect - k8s.io/mount-utils v0.29.4 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect + k8s.io/apiextensions-apiserver v0.30.4 // indirect + k8s.io/cloud-provider v0.30.4 // indirect + k8s.io/controller-manager v0.30.4 // indirect + k8s.io/csi-translation-lib v0.30.4 // indirect + k8s.io/dynamic-resource-allocation v0.30.4 // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect + k8s.io/kms v0.30.4 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kubelet v0.30.4 // indirect + k8s.io/metrics v0.30.4 // indirect + k8s.io/mount-utils v0.30.4 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/release-utils v0.3.0 // indirect ) @@ -153,33 +153,33 @@ replace ( go.opentelemetry.io/otel/metric => go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v1.21.0 - k8s.io/api => k8s.io/api v0.29.4 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.29.4 - k8s.io/apiserver => k8s.io/apiserver v0.29.4 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.4 - k8s.io/client-go => k8s.io/client-go v0.29.4 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.4 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.4 - k8s.io/code-generator => k8s.io/code-generator v0.29.4 - k8s.io/component-base => k8s.io/component-base v0.29.4 - k8s.io/component-helpers => k8s.io/component-helpers v0.29.4 - k8s.io/controller-manager => k8s.io/controller-manager v0.29.4 - k8s.io/cri-api => k8s.io/cri-api v0.29.4 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.4 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.4 - k8s.io/endpointslice => k8s.io/endpointslice v0.29.4 - k8s.io/kms => k8s.io/kms v0.29.4 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.4 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.4 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.4 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.4 - k8s.io/kubectl => k8s.io/kubectl v0.29.4 - k8s.io/kubelet => k8s.io/kubelet v0.29.4 - k8s.io/kubernetes => k8s.io/kubernetes v1.29.4 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.4 - k8s.io/metrics => k8s.io/metrics v0.29.4 - k8s.io/mount-utils => k8s.io/mount-utils v0.29.4 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.4 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.4 + k8s.io/api => k8s.io/api v0.30.4 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.30.4 + k8s.io/apimachinery => k8s.io/apimachinery v0.30.4 + k8s.io/apiserver => k8s.io/apiserver v0.30.4 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.30.4 + k8s.io/client-go => k8s.io/client-go v0.30.4 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.30.4 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.30.4 + k8s.io/code-generator => k8s.io/code-generator v0.30.4 + k8s.io/component-base => k8s.io/component-base v0.30.4 + k8s.io/component-helpers => k8s.io/component-helpers v0.30.4 + k8s.io/controller-manager => k8s.io/controller-manager v0.30.4 + k8s.io/cri-api => k8s.io/cri-api v0.30.4 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.30.4 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.30.4 + k8s.io/endpointslice => k8s.io/endpointslice v0.30.4 + k8s.io/kms => k8s.io/kms v0.30.4 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.30.4 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.30.4 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.30.4 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.30.4 + k8s.io/kubectl => k8s.io/kubectl v0.30.4 + k8s.io/kubelet => k8s.io/kubelet v0.30.4 + k8s.io/kubernetes => k8s.io/kubernetes v1.30.4 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.4 + k8s.io/metrics => k8s.io/metrics v0.30.4 + k8s.io/mount-utils => k8s.io/mount-utils v0.30.4 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.30.4 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.30.4 ) diff --git a/go.sum b/go.sum index 78a4f5870..64980b17a 100644 --- a/go.sum +++ b/go.sum @@ -772,22 +772,10 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -805,6 +793,7 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -827,7 +816,6 @@ github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdII github.com/ReneKroon/ttlcache/v2 v2.10.0/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= -github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/acobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249/go.mod h1:iU1PxQMQwoHZZWmMKrMkrNlY+3+p9vxIjpZOVyxWa0g= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= @@ -867,8 +855,6 @@ github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3A github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.37.6/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -1072,14 +1058,12 @@ github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr github.com/crossplane/crossplane-runtime v0.14.1-0.20210713194031-85b19c28ea88 h1:JntebucOC129rP7Lsd1lbvn1GBTOhZkzPkJOSJ96pXI= github.com/crossplane/crossplane-runtime v0.14.1-0.20210713194031-85b19c28ea88/go.mod h1:0sB8XOV2zy1GdZvSMY0/5QzKQJUiNSek08wbAYHJbws= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= -github.com/danwinship/knftables v0.0.13/go.mod h1:OzipaBQqkQAIbVnafTGyHgfFbjWTJecrA7/XNLNMO5E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -1096,13 +1080,12 @@ github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEg github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.27+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -1114,7 +1097,6 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -1154,8 +1136,8 @@ github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -1175,6 +1157,7 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -1212,14 +1195,14 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= @@ -1243,7 +1226,6 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= @@ -1253,7 +1235,6 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= @@ -1311,9 +1292,9 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cadvisor v0.48.1/go.mod h1:ZkYbiiVdyoqBmI2ahZI8GlmirT78OAOER0z4EQugkxQ= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cadvisor v0.49.0/go.mod h1:s6Fqwb2KiWG6leCegVhw4KW40tf9f7m+SF1aXiE8Wsk= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -1619,7 +1600,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -1664,8 +1644,11 @@ github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxe github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1692,8 +1675,11 @@ github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+q github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1710,8 +1696,7 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= -github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1811,10 +1796,10 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rasky/go-xdr v0.0.0-20170217172119-4930550ba2e2/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -1826,7 +1811,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1971,10 +1955,9 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= -github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/go-gitlab v0.43.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -2069,22 +2052,21 @@ go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0H go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= @@ -2110,9 +2092,7 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= @@ -2194,8 +2174,10 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2284,6 +2266,7 @@ golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= @@ -2326,8 +2309,9 @@ golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2347,8 +2331,10 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2500,6 +2486,7 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2638,8 +2625,10 @@ golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2967,6 +2956,7 @@ google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwS google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= @@ -3049,37 +3039,37 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= -k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= -k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk= -k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM= -k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= -k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= -k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= -k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= -k8s.io/cli-runtime v0.29.4/go.mod h1:NmklYuZ4DLfOr2XEIT8Nzl883KMZUCv7KMj3wMHayCA= -k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= -k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= -k8s.io/cloud-provider v0.29.4 h1:XRKl818NKQWan4UZ7fXFAkEJLJkBFq5sQROrbLbryM4= -k8s.io/cloud-provider v0.29.4/go.mod h1:sC7wyt5z5IRoNuU9JhEuMdRnhDEz6cWAJCyMcNUH9X8= -k8s.io/cluster-bootstrap v0.29.4/go.mod h1:RmUZPo5l+ZwGYZqOA6+xPVPNwPziVQfw6IFT7AaMr0U= -k8s.io/code-generator v0.29.4 h1:8ESudFNbY5/9BzB8KOEFG2uV9Q0AQxkc4mrQESr30Ks= -k8s.io/code-generator v0.29.4/go.mod h1:7TYnI0dYItL2cKuhhgPSuF3WED9uMdELgbVXFfn/joE= -k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= -k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= -k8s.io/component-helpers v0.29.4 h1:lbVFhywtv64KlaIYTKszkHaFAqwCjNn7xyRTeWorzfI= -k8s.io/component-helpers v0.29.4/go.mod h1:rMOVMGYEju7/GKMV0USfYAYJBIQdxlMMN1VFl/Mf2so= -k8s.io/controller-manager v0.29.4 h1:rzEwLboRTXBZhYUY02nNhORHQlcXGDE3EPS2IZRd0cg= -k8s.io/controller-manager v0.29.4/go.mod h1:XG6oraSxieDl6XBdO2HnkA6DwEfoCKS3OCpqO4Xb0zU= -k8s.io/cri-api v0.29.4/go.mod h1:A6pdbjzML2xi9B0Clqn5qt1HJ3Ik12x2j+jv/TkqjRE= -k8s.io/csi-translation-lib v0.29.4 h1:ad0SlFsd0iB3PyXiAVVN4KxsTqgmPEHQyYGbJVQP1rA= -k8s.io/csi-translation-lib v0.29.4/go.mod h1:xdLMENgzc213O3qba2fWYPgBv3JiPqRfjsRanu2Te64= -k8s.io/dynamic-resource-allocation v0.29.4 h1:jfiZ44lCAWZrbQvhyJjWQe7eIIJt45OtepzAuK7rIZQ= -k8s.io/dynamic-resource-allocation v0.29.4/go.mod h1:e8zxuiMs+ojWL2KbJ2SquHUNaQ2vhiuLfobAFmWAeKw= -k8s.io/endpointslice v0.29.4/go.mod h1:+PAJDJAVnf7dIPds5c8RfqmD21XC/gn5JUMlVw4NVKc= +k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs= +k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0= +k8s.io/apiextensions-apiserver v0.30.4 h1:FwOMIk/rzZvM/Gx0IOz0+biZ+dlnlCeyfXW17uzV1qE= +k8s.io/apiextensions-apiserver v0.30.4/go.mod h1:m8cAkJ9PVU8Olb4cPW4hrUDBZGvoSJ0kY0G0CfdGQac= +k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY= +k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.4 h1:rHkGJhxd+m4jILrgkenwSmG4X0QXk6ecGuybzS/PQak= +k8s.io/apiserver v0.30.4/go.mod h1:oyGAj9B9/0+I9huJyf4/8SMBF2mNh2bTMlu7703dkH8= +k8s.io/cli-runtime v0.30.4/go.mod h1:O9Rf0F9x0zeMCcBQEIQxzd4zvxoqfhZZgDAet4hhBPs= +k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY= +k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc= +k8s.io/cloud-provider v0.30.4 h1:j5T/KePmxux289heU+aG+Aq3RmaGfzARAglWUkxTErE= +k8s.io/cloud-provider v0.30.4/go.mod h1:OfI8YUt8pCU8xvkN1dQ1pvJpQwNZlEszIY186v68H7A= +k8s.io/cluster-bootstrap v0.30.4/go.mod h1:Jh3Gp1uh/KC7YcCDkugs01Ld0dQJzWhzWs/GQ3q8Lk0= +k8s.io/code-generator v0.30.4 h1:1J2AcpPNBGh/NH9+m4TDh8Yj+mSbM+JyQhH0QdIMwmE= +k8s.io/code-generator v0.30.4/go.mod h1:Dd8gxOr5ieh9yHCLKnIkKDmk1H2glH8nYCAqwFogD2M= +k8s.io/component-base v0.30.4 h1:FlgKqazIkIIxpLA4wFXsiPiDllJn9fhsN3G4TeX7T7U= +k8s.io/component-base v0.30.4/go.mod h1:Qd3h+OJxV/LrnriXG/E15ZK83dzd306qJHW9+87S5ls= +k8s.io/component-helpers v0.30.4 h1:A4KYmrz12HZtGZ8TAnanl0SUx7n6tKduxzB3NHvinr0= +k8s.io/component-helpers v0.30.4/go.mod h1:h5D4gI8hGQXMHw90qJq41PRUJrn2dvFA3ElZFUTzRps= +k8s.io/controller-manager v0.30.4 h1:PdAGa5srv9fTECbBtWeaLshNpy//hGHHpXjRkh1wOkQ= +k8s.io/controller-manager v0.30.4/go.mod h1:fTVfW8X0yJh+pUuybc45WxyoLQEhJqJjSff6/2b+l3I= +k8s.io/cri-api v0.30.4/go.mod h1://4/umPJSW1ISNSNng4OwjpkvswJOQwU8rnkvO8P+xg= +k8s.io/csi-translation-lib v0.30.4 h1:CxcU8ovSYo267gsDDKdnSO/seSnH3J6dj/HWGcR/OyQ= +k8s.io/csi-translation-lib v0.30.4/go.mod h1:zOyhRMqMOTeWl7NXLMnAfBmgAooFUL51vL9GiyaZXVo= +k8s.io/dynamic-resource-allocation v0.30.4 h1:ixvGblhL97SCHQ3n3jRgyGmo0GQEDR1QKVZoNmzT7k0= +k8s.io/dynamic-resource-allocation v0.30.4/go.mod h1:00Gc/KBbg713RGf2ueWtssdwvgtL6egOCo6c+ixvNww= +k8s.io/endpointslice v0.30.4/go.mod h1:9iRogOOxizgQboz9LBIaCu74qB/q0tNnJeC440ceZUI= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog/hack/tools v0.0.0-20210917071902-331d2323a192 h1:u27Xm1of9MTDM1CZW3hg0Vv04ohywEG152G8mpr9n8Y= k8s.io/klog/hack/tools v0.0.0-20210917071902-331d2323a192/go.mod h1:DXW3Mv8xqJvjXWiBSBHrK2O4mq5LMD0clqkv3b1g9HA= @@ -3090,31 +3080,31 @@ k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kms v0.29.4 h1:cFGEoCLwoXk/eqYZppLZxybCdmEWeRKMCbm9f13IdRQ= -k8s.io/kms v0.29.4/go.mod h1:vWVImKkJd+1BQY4tBwdfSwjQBiLrnbNtHADcDEDQFtk= -k8s.io/kube-aggregator v0.29.4/go.mod h1:zBfe4iXXmw5HinNgN0JoAu5rpXdyCUvRfG99+FVOd68= -k8s.io/kube-controller-manager v0.29.4/go.mod h1:Pcuyqbg/YzWWL5E+yxgaHemAiHRM8oH96U/w3Qe8k0w= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kms v0.30.4 h1:Je7wR5/m+w/E7Ef9R9RY1yeMU/C2GXIvhzRFfg8H5kQ= +k8s.io/kms v0.30.4/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= +k8s.io/kube-aggregator v0.30.4/go.mod h1:w/ZLttTfMdPmEzV9k/9O2ENKI77ENpwIzeljQ5fj+qc= +k8s.io/kube-controller-manager v0.30.4/go.mod h1:8RRCcoUZP1FDzZwjSs7kQS1X0u8OHcDWfVO28fEeQzo= k8s.io/kube-openapi v0.0.0-20230109183929-3758b55a6596/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0= k8s.io/kube-openapi v0.0.0-20230531092745-9b4dcd38a4bf/go.mod h1:l8HTwL5fqnlns4jOveW1L75eo7R9KFHxiE0bsPGy428= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kube-proxy v0.29.4/go.mod h1:Mi+GF8HGGw984eI+/0BfrM5i2Vb8qaUh677CGkqj71g= -k8s.io/kube-scheduler v0.29.4 h1:6llyoMc7rC/b/l9mCZUPc9UMuwb4UP75D+Ue6SJ+vK0= -k8s.io/kube-scheduler v0.29.4/go.mod h1:9/Hr6P5D+uMywNwHOhJUXthVrGdlykZJvw6p6O/WyS0= -k8s.io/kubectl v0.29.4/go.mod h1:YTKRF9y1/ccqZ2bnpOWaJD8V7johKqZR/qOMq+0pfxU= -k8s.io/kubelet v0.29.4 h1:6fTt4sTd5xqTtIhVoS7PkiFUBevQsyu3ZmENVvwY62M= -k8s.io/kubelet v0.29.4/go.mod h1:lAu6Z17pxKwgM+9hsgGkqFjYTOhbc0dnZ6GNnlbjYW0= -k8s.io/kubernetes v1.29.4 h1:n4VCbX9cUhxHI+zw+m2iZlzT73/mrEJBHIMeauh9g4U= -k8s.io/kubernetes v1.29.4/go.mod h1:28sDhcb87LX5z3GWAKYmLrhrifxi4W9bEWua4DRTIvk= -k8s.io/legacy-cloud-providers v0.29.4/go.mod h1:pFhV4bUVrEJEznsvz+I8WhlZwgowOgKNGASVV1BR7b0= -k8s.io/metrics v0.29.4 h1:06sZ63/Kt9HEb5GP/1y6xbHDz6XkxnHpu949UdXfoXQ= -k8s.io/metrics v0.29.4/go.mod h1:ZN9peB0nLTqPZuwQna8ZUrPFJQ0i8QNH4pqRJopS+9c= -k8s.io/mount-utils v0.29.4 h1:tW/URea4gtXlaVW7VObr52NQhS+z3SXTg1GUaFZjRL4= -k8s.io/mount-utils v0.29.4/go.mod h1:SHUMR9n3b6tLgEmlyT36cL6fV6Sjwa5CJhc0guCXvb0= -k8s.io/pod-security-admission v0.29.4/go.mod h1:PNErt3eRnzVx2zxIdYmgk7vBos5Qm4c8U5QXKvXFfxQ= -k8s.io/sample-apiserver v0.29.4/go.mod h1:BSGXyh2pgrzNUGyzIbBl3Q+gxMbfcdBn0M327MiG1aY= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-proxy v0.30.4/go.mod h1:WfeZOgj8FAoVW2V5FoTyZsCDHhl0aF0YRgfp4GHtCpc= +k8s.io/kube-scheduler v0.30.4 h1:6H+NfNuJ4RvUdSD1dtnJBXuYGAp+ETGZaxFk4sLGPdU= +k8s.io/kube-scheduler v0.30.4/go.mod h1:7MMICziKySYQQ7rOFGBsSkn1PE3urPwsxlp6uwU+NhU= +k8s.io/kubectl v0.30.4/go.mod h1:4KnGCshO4fFxd/tncWcbKH3Nj9wtoFYwMYPj8CUnduE= +k8s.io/kubelet v0.30.4 h1:2TP59RVxuWuKpD58gQ6qow1Oy2Ys2uOH4hfSD/qv5EQ= +k8s.io/kubelet v0.30.4/go.mod h1:v0lRl+1y2NNId5OlFiJ1rhjXc9D8Tp7PqvQYJS7W/L0= +k8s.io/kubernetes v1.30.4 h1:LfWX7JNmT9Hp8uFVHsB9gQCZesjcWTQ02PHwMz6dGqk= +k8s.io/kubernetes v1.30.4/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0= +k8s.io/legacy-cloud-providers v0.30.4/go.mod h1:Wbpfm7Ul3SxIs0B1U30OMtb4Vy6ZicrOg3hgTo/tEis= +k8s.io/metrics v0.30.4 h1:CI7Df+c2G65X9B/e6BxK8c1MqmjXRj3Fdd6VEN4N7YQ= +k8s.io/metrics v0.30.4/go.mod h1:3zPvjwIFSjjwdWIUwEaTO+/ugUpzFCV+l3jISZ+/x3A= +k8s.io/mount-utils v0.30.4 h1:48vAsFQNwSpFJ8a3+G1PoxEn5OmzS+5AajZjQwLY8t4= +k8s.io/mount-utils v0.30.4/go.mod h1:9sCVmwGLcV1MPvbZ+rToMDnl1QcGozy+jBPd0MsQLIo= +k8s.io/pod-security-admission v0.30.4/go.mod h1:FL0x1BxYxKWoEZMqAw+HI0gj1TiCiJqQD9J/9rONkDI= +k8s.io/sample-apiserver v0.30.4/go.mod h1:Q9n3oQqbN7X/Xnhaa2qBKoMu8ZU+nt5tJjBNIIY7xjU= k8s.io/system-validators v1.8.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -3181,16 +3171,17 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= sigs.k8s.io/controller-runtime v0.10.3/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= -sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw= -sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk= +sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= sigs.k8s.io/controller-tools v0.7.0/go.mod h1:bpBAo0VcSDDLuWt47evLhMLPxRPxMDInTEH/YbdeMK0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/knftables v0.0.14/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= sigs.k8s.io/kustomize/api v0.13.4/go.mod h1:Bkaavz5RKK6ZzP0zgPrB7QbpbBJKiHuD3BB0KujY7Ls= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/cmd/config v0.11.2/go.mod h1:PCpHxyu10daTnbMfn3xhH1vppn7L8jsS3qpRKXb7Lkc= diff --git a/hack/build-images.sh b/hack/build-images.sh index 9b2588e01..9213ffe3a 100755 --- a/hack/build-images.sh +++ b/hack/build-images.sh @@ -23,38 +23,49 @@ SCRIPT_ROOT=$(realpath $(dirname "${BASH_SOURCE[@]}")/..) SCHEDULER_DIR="${SCRIPT_ROOT}"/build/scheduler CONTROLLER_DIR="${SCRIPT_ROOT}"/build/controller -REGISTRY=${REGISTRY:-"localhost:5000/scheduler-plugins"} -IMAGE=${IMAGE:-"kube-scheduler:latest"} -CONTROLLER_IMAGE=${CONTROLLER_IMAGE:-"controller:latest"} -RELEASE_VERSION=${RELEASE_VERSION:-"v0.0.0"} - -BUILDER=${BUILDER:-"docker"} +# -t is the Docker engine default +TAG_FLAG="-t" +# If docker is not present, fall back to nerdctl +# TODO: nerdctl doesn't seem to have buildx. if ! command -v ${BUILDER} && command -v nerdctl >/dev/null; then BUILDER=nerdctl fi -ARCH=${ARCH:-$(go env GOARCH)} -if [[ "${ARCH}" == "arm64" ]]; then - ARCH="arm64v8" +# podman needs the manifest flag in order to create a single image. +if [[ "${BUILDER}" == "podman" ]]; then + TAG_FLAG="--manifest" fi -GO_BASE_IMAGE=${GO_BASE_IMAGE:-"golang"} -ALPINE_BASE_IMAGE=${ALPINE_BASE_IMAGE:-"$ARCH/alpine"} - cd "${SCRIPT_ROOT}" +IMAGE_BUILD_CMD=${DOCKER_BUILDX_CMD:-${BUILDER} buildx} + +# use RELEASE_VERSION==v0.0.0 to tell if it's a local image build. +BLD_INSTANCE="" +if [[ "${RELEASE_VERSION}" == "v0.0.0" ]]; then + BLD_INSTANCE=$($IMAGE_BUILD_CMD create --use) +fi + +# DOCKER_BUILDX_CMD is an env variable set in CI (valued as "/buildx-entrypoint") +# If it's set, use it; otherwise use "$BUILDER buildx" +${IMAGE_BUILD_CMD} build \ + --platform=${PLATFORMS} \ + -f ${SCHEDULER_DIR}/Dockerfile \ + --build-arg RELEASE_VERSION=${RELEASE_VERSION} \ + --build-arg GO_BASE_IMAGE=${GO_BASE_IMAGE} \ + --build-arg DISTROLESS_BASE_IMAGE=${DISTROLESS_BASE_IMAGE} \ + --build-arg CGO_ENABLED=0 \ + ${EXTRA_ARGS:-} ${TAG_FLAG:-} ${REGISTRY}/${IMAGE} . + +${IMAGE_BUILD_CMD} build \ + --platform=${PLATFORMS} \ + -f ${CONTROLLER_DIR}/Dockerfile \ + --build-arg RELEASE_VERSION=${RELEASE_VERSION} \ + --build-arg GO_BASE_IMAGE=${GO_BASE_IMAGE} \ + --build-arg DISTROLESS_BASE_IMAGE=${DISTROLESS_BASE_IMAGE} \ + --build-arg CGO_ENABLED=0 \ + ${EXTRA_ARGS:-} ${TAG_FLAG:-} ${REGISTRY}/${CONTROLLER_IMAGE} . -${BUILDER} build \ - -f ${SCHEDULER_DIR}/Dockerfile \ - --build-arg ARCH=${ARCH} \ - --build-arg RELEASE_VERSION=${RELEASE_VERSION} \ - --build-arg GO_BASE_IMAGE=${GO_BASE_IMAGE} \ - --build-arg ALPINE_BASE_IMAGE=${ALPINE_BASE_IMAGE} \ - -t ${REGISTRY}/${IMAGE} . -${BUILDER} build \ - -f ${CONTROLLER_DIR}/Dockerfile \ - --build-arg ARCH=${ARCH} \ - --build-arg RELEASE_VERSION=${RELEASE_VERSION} \ - --build-arg GO_BASE_IMAGE=${GO_BASE_IMAGE} \ - --build-arg ALPINE_BASE_IMAGE=${ALPINE_BASE_IMAGE} \ - -t ${REGISTRY}/${CONTROLLER_IMAGE} . +if [[ ! -z $BLD_INSTANCE ]]; then + ${DOCKER_BUILDX_CMD:-${BUILDER} buildx} rm $BLD_INSTANCE +fi \ No newline at end of file diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 02032354c..42b295c0d 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -36,17 +36,16 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code- source "${CODEGEN_PKG}/kube_codegen.sh" kube::codegen::gen_helpers \ - --input-pkg-root sigs.k8s.io/scheduler-plugins/apis \ - --output-base "../../" \ - --boilerplate "${SCRIPT_ROOT}/hack/boilerplate/boilerplate.generatego.txt" + --boilerplate "${SCRIPT_ROOT}/hack/boilerplate/boilerplate.generatego.txt" \ + "${SCRIPT_ROOT}/apis" kube::codegen::gen_client \ --with-watch \ --with-applyconfig \ - --input-pkg-root sigs.k8s.io/scheduler-plugins/apis \ - --output-base "../../" \ - --output-pkg-root sigs.k8s.io/scheduler-plugins/pkg/generated \ - --boilerplate "${SCRIPT_ROOT}/hack/boilerplate/boilerplate.generatego.txt" + --output-dir "${SCRIPT_ROOT}/pkg/generated" \ + --output-pkg sigs.k8s.io/scheduler-plugins/pkg/generated \ + --boilerplate "${SCRIPT_ROOT}/hack/boilerplate/boilerplate.generatego.txt" \ + "${SCRIPT_ROOT}/apis" ${CONTROLLER_GEN} object:headerFile="hack/boilerplate/boilerplate.generatego.txt" \ paths="./apis/scheduling/..." diff --git a/kep/tbd-peaks-power-aware-scheduling/README.md b/kep/tbd-peaks-power-aware-scheduling/README.md new file mode 100644 index 000000000..5f3b8fcd1 --- /dev/null +++ b/kep/tbd-peaks-power-aware-scheduling/README.md @@ -0,0 +1,330 @@ + +# KEP-TBD: Power Efficiency Aware Kubernetes Scheduling (PEAKS) + +## Table of Contents + + +- [Release Signoff Checklist](#release-signoff-checklist) +- [Summary](#summary) +- [Motivation](#motivation) + - [Goals](#goals) + - [Non Goals](#non-goals) +- [Proposal](#proposal) + - [User Stories](#user-stories) + - [Story 1](#story-1) + - [Story 2](#story-2) + - [Notes/Constraints/Caveats](#notesconstraintscaveats) + - [Risks and Mitigations](#risks-and-mitigations) +- [Design Details](#design-details) + - [PEAKS workflow](#peaks-workflow) + - [Pre-requisite modules](#pre-requisite-modules) + - [Use-cases (that save energy using PEAKS plugin)](#use-cases-that-save-energy-using-peaks-plugin) + - [Test Plan](#test-plan) + - [Graduation Criteria](#graduation-criteria) + - [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy) + - [Version Skew Strategy](#version-skew-strategy) +- [Production Readiness Review Questionnaire](#production-readiness-review-questionnaire) + - [Feature Enablement and Rollback](#feature-enablement-and-rollback) + - [Rollout, Upgrade and Rollback Planning](#rollout-upgrade-and-rollback-planning) + - [Monitoring Requirements](#monitoring-requirements) + - [Dependencies](#dependencies) + - [Scalability](#scalability) + - [Troubleshooting](#troubleshooting) +- [Implementation History](#implementation-history) +- [Drawbacks](#drawbacks) +- [Alternatives](#alternatives) +- [Infrastructure Needed (Optional)](#infrastructure-needed-optional) + + +## Release Signoff Checklist + +Items marked with (R) are required *prior to targeting to a milestone / release*. + +- [ ] (R) Enhancement issue in release milestone, which links to KEP dir in [kubernetes/enhancements] (not the initial KEP PR) +- [ ] (R) KEP approvers have approved the KEP status as `implementable` +- [ ] (R) Design details are appropriately documented +- [ ] (R) Test plan is in place, giving consideration to SIG Architecture and SIG Testing input (including test refactors) + - [ ] e2e Tests for all Beta API Operations (endpoints) + - [ ] (R) Ensure GA e2e tests for meet requirements for [Conformance Tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md) + - [ ] (R) Minimum Two Week Window for GA e2e tests to prove flake free +- [ ] (R) Graduation criteria is in place + - [ ] (R) [all GA Endpoints](https://github.com/kubernetes/community/pull/1806) must be hit by [Conformance Tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md) +- [ ] (R) Production readiness review completed +- [ ] (R) Production readiness review approved +- [ ] "Implementation History" section is up-to-date for milestone +- [ ] User-facing documentation has been created in [kubernetes/website], for publication to [kubernetes.io] +- [ ] Supporting documentation—e.g., additional design documents, links to mailing list discussions/SIG meetings, relevant PRs/issues, release notes + +[kubernetes.io]: https://kubernetes.io/ +[kubernetes/enhancements]: https://git.k8s.io/enhancements +[kubernetes/kubernetes]: https://git.k8s.io/kubernetes +[kubernetes/website]: https://git.k8s.io/website + +## Summary + +PEAKS (Power Efficiency Aware Kubernetes Scheduler) is a Kubernetes Scheduler plugin that aims to optimize the aggregate power consumption of the entire cluster during scheduling. It uses pre-trained Machine Learning models correlating Node Utilization with Power Consumption to predict the most suitable nodes for scheduling workloads. These predictions are based on the resource needs of incoming workloads and the real-time utilization of nodes within the cluster. + +## Motivation + +The Kubernetes scheduler framework supports multiple plugins, enabling the scheduling of pods on nodes to optimize various objectives. However, within the ecosystem, there lacks a solution specifically designed to optimize power efficiency while simultaneously fulfilling other scheduling goals. + +A new plugin addresses this gap by incorporating power efficiency as a scheduling criterion alongside other objectives, such as optimizing resource allocation or adhering to specific topology requirements. + +### Goals + +1. Provide a configurable scheduling plugin to minimize the aggregate power consumption of the cluster. +2. Implement the aforementioned features as Score plugins. +3. Avoid altering the behavior of default score plugins unless it's necessary. + +### Non Goals + +1. De-scheduling resulting from unexpected outcomes (such as hot nodes or fragmentation) due to past scoring by plugins is not addressed in the initial design. +2. Memory, network, and disk utilization are not considered in the initial design. +3. The migration of already scheduled pods running on less power-efficient nodes to more power-efficient nodes is out of scope in the initial design. +4. Shutting down nodes to optimize power by migrating already running pods to other cluster nodes is out of scope in the initial design. + +## Proposal + +### User Stories + +#### Story 1 + +With 'heterogeneous nodes' (variations in CPU architectures, a mixed cluster consisting of both VM and Bare Metal nodes, and dissimilar allocation of resources across nodes), cluster owners can reap benefits, as the power efficiency of different nodes may vary. This often necessitates the use of customized power models for each node within the cluster. + +#### Story 2 + +Even with 'homogeneous nodes,' if the CPU vs. Power relationship is non-linear (such as piece-wise linear or concave), cluster owners can still benefit. Although the power efficiency might be consistent across all nodes, the varying CPU utilizations among nodes at any given time can significantly impact the aggregate power consumption of the cluster. Placing an incoming pod on one node versus another can influence this consumption. + +### Notes/Constraints/Caveats + +- The [Min, Max] score range for the plugin is user-configurable, defaulting to [0, 100]. +- Additionally, the plugin normalizes the generated scores within the supplied [Min, Max] range. +- There isn't a singular model that comprehensively depicts the correlation between utilization and power consumption for every cluster node. +- At any given time, the current utilization of a node can be identified through metrics data. +- Determining the current power consumption can be achieved either through live metrics or by inference from the model that describes the relationship between node utilization and power consumption. +- Retraining the model representing this relationship may be necessary to better align with the workloads running on the node. + +### Risks and Mitigations + +- The benchmark test below captures the overhead resulting from model inferencing latency, which appears to be negligible. +- The accuracy of the model that captures the relationship between utilization and power relies heavily on the quality of the metrics used for its training. The current implementation supports integration with various user-selected metrics providers, such as node-metrics and Kepler +- PEAKS plugin requires the power model parameters for each cluster node configured with it. However, the power model (utilization vs. power consumption) parameters are subjected to change (e.g., increase in the number of CPUs on a node results in a change in the power model) over time. To address this, `configmap` is used as a mechanism to pass the updated power model parameters of any cluster node seamlessly. + +## Design Details + +### PEAKS workflow +![PEAKS workflow](./figs/PEAKS_workflow.png) + +Here is a brief discription of some of the steps in the above workflow: +- Create a power model for each cluster node (or, a power model for each nodepool if nodepools can be created): If a power model suitable for the workload to be scheduled already exists, then the same can be used to avoid creating a new power model. Thus it is an optional step. + - PEAKS plugin allows the cluster owners to bring their own power models that best represent both the node behavior and the workload characteristics. + - Training the power model is outside the scope of PEAKS plugin. + - PEAKS plugin only does power model inferencing. + - Here is an example that models the "CPU utilization vs. Power" relationship for a two node cluster. + - This power model changes corresponding to a change either in the node configuration or in the workload characteristics.![Power-models-of-nodes](./figs/PowerModelsOfNodes.png) + - The power model typically is a mathematical expression (e.g., `NodePower = K0 + K1 * e^(K2 * x)`, where `x` is node utilisation and each `K` is a constant). + - Training the power model implies learning the values of the parameters of the corresponding mathematical expression (e.g., learning the values of `K0`, `K1` and `K2` above). + - Inferencing the power model implies computing the value of the expression representing the node power by supplying the value of the node utilization (e.g., evaluating the function `NodePower` by supplying the value of the parameter `x`). + - Kubernetes API object `ConfigMap` can be used as a mechanism to pass the power model parameters of any cluster node seamlessly + - Power model inferencing is part of the function that implements the [ScorePlugin](https://github.com/kubernetes/kubernetes/blob/master/pkg/scheduler/framework/interface.go#L612) interface function `Score` of the PEAKS plugin. +- Get the resource need for a pod to be scheduled: This step requires estimating the pod resource needs. + - Consider pod resource requirements (requests/limits) from its specification as an alternative. + - If there are one or more running pods with the same image (case of auto scaling), consider the average load or resource need across those pods. +- Find the change in the utilization if the pod is to be scheduled on a node: This step requires the current utilization and the updated utilization after the pod placement on a node (so that the instantaneous power at both these utilizations can be found from the power model later) + - Current node utilization can be queried from Prometheus (with the metric source provider being a service like node-exporter or load-watcher). + - Add the change in utilization (corresponding to the pod resource requirement) to the current utilization to get the updated utilization +- Return the change in the node's instantaneous power: + - The power consumption at the current utilization can be either found from the node metrics or by referring to the power model + - The power consumption at the updated utilization can be found by referring to the power model +- Normalize plugin score: The scores provided by different score plugins (which are in the range [framework.MinNodeScore, framework.MaxNodeScore]) are normalized to the range [0, 100] by the PEAKS plugin implementation of the `ScoreExtensions` interface function [NormalizeScore](https://github.com/kubernetes/kubernetes/blob/master/pkg/scheduler/framework/interface.go#L602). +- Choose a suitable weight for PEAKS plugin in kube-scheduler configuration: This is specific to the environment configuration. + - If the K8s cluster nodes are running using non-renewable energy sources, then a high weightage for PEAKS plugin can result into reduced CO2 emissions + +### Pre-requisite modules +- Prometheus + - For energy metrics collection +- Load-watcher + - For collecting node usage metrics exported after processing metrics received from source +- Node-exporter + - Source of node usage metrics +- Kepler + - Source of energy consumption metrics + +### Use-cases (that save energy using PEAKS plugin) +Below use-case scenarios demonstrate energy savings using PEAKS scheduler plugin over default kube-scheduler. +- Deployment of a Pod (via kube-scheduler): + - On a K8s cluster with heterogeneous node configurations (resulting in the energy efficiency of the nodes not being the same), while deploying a pod, the `kube-scheduler` prefers energy efficient nodes for pod placement using PEAKS scheduler plugin vs. randomly selects a node for pod placement using default scheduler plugin. + - Since energy savings decrease with the increased use of energy ineffecient nodes, PEAKS plugin favours pod placement on energy efficient nodes over pod placement on energy inefficient nodes. + - Below graph demonstrates energy saving while kube-scheduler deploys a pod (In the below graph, `n1` refers to the `Node 1` introduced above which is part of the two node cluster; this is the more energy efficient node `tantawi1`. Similarly, `n2` refers to the `Node 2` introduced above which is part of the two node cluster; this is the less energy efficient node `tantawi2`). ![PEAKS with Kube-scheduler](./figs/PEAKS_with_Kube-scheduler.png) + - The graph in the left depicts pod placement on the less energy effecient node which was randomly selected by default kube-scheduler. + - The graph in the middle depicts pod placement on the more energy efficient node which was the preferred choice by PEAKS scheduler plugin. + - The graph in the right depicts the savings in energy consumption across the cluster nodes over time (i.e., difference in the aggregate cluster energy consumed under PEAKS plugin placement vs. default scheduler placement) increas as the pods continue execution. + - The energy savings will grow, when the node utilizations increase and/or pods run for longer duration (node utilizations were ~12% during this experiment which ran for 10 minutes). +- Scaling of a Pod: + - On a K8s cluster with heterogeneous node configurations (resulting in the energy efficiency of the nodes not being the same), the `k8s schedular using PEAKS plugin` packs the pods on nodes vs. the `default kube-scheduler` spreads the pods on nodes. + - Since energy savings decrease with the increased use of energy ineffecient nodes, PEAKS plugin favours pod placement on energy efficient nodes over pod placement on energy inefficient nodes. + - Two experiments were conducted: + - using HPA controller, and + - using `kubectl scale` command. + - Below graph demonstrates energy saving while auto scaling pods with HPA controller ![PEAKS with HPA](./figs/PEAKS_with_HPA.png) + - The graph in the left depicts spreading of pods on nodes using default kube-scheduler. + - The graph in the middle depicts packing of pods on energy efficient nodes before pod placement on energy (relatively) inefficient cluster nodes using PEAKS scheduler plugin. + - The graph in the right depicts the savings in energy across the cluster nodes over time (i.e., difference in the aggregate cluster energy consumed under PEAKS plugin placement vs. default scheduler placement) as the pods scale up. + - There is ~10\% of energy savings observed at the end of 15 minutes of workload execution in this experiment. + - Note that the difference in the aggregate energy consumption of the cluster nodes reduces (highlighted in red circle) as PEAKS plugin places pods on energy inefficient nodes. + - `Kubectl scale` command allows resizing of a deployment, replica set, replication controller, or stateful set. + - Here is an illustation of how the `kubectl scale` command was used in the experiments to `realize energy savings for all these types of k8s objects` using PEAKS plugin . + ![Kubectl-scale_Default](./figs/Kubectl-scale_Default.png) + ![Kubectl-scale_PEAKS](./figs/Kubectl-scale_PEAKS.png) + - While `kubectl scale` can also be used to decrease the number of pods, scheduler (PEAKS plugin) doesn't have any role to play. Hence, pods running on on more energy efficient nodes may get evicted. One may use pod priority and preemption to evict pods from less energy efficient nodes as a workaround. Or, it may be possible to configure the [Descheduler](https://github.com/kubernetes-sigs/descheduler) to evict pods from less energy efficient nodes. +- Migration of a Pod (via explicit eviction) + - An application pods might be placed on the energy ineffecient cluster nodes at the time of scheduling as other applications might be running on the energy effecient nodes. When one or more of those applications complete, resources become available on the energy efficient cluster nodes. Two options are available at this stage: + - Either the application pods continue to run on the nodes on which they were originally placed. + - Or, those pods may be migrated to energy efficient nodes to save energy. + - Periodically check the resource availability on energy efficient nodes via a script. When resources are available, the same script deletes the application pods running on energy inefficient nodes so that the scheduler (configured with PEAKS plugin) places them on the best possible energy efficient nodes. + - This mimics auto-migration of application pods from energy inefficient nodes to energy efficient nodes to save energy consumption. + - Identification of the target node for the migration of a pod from an energy inefficient node is carried out by the PEAKS plugin. + - A pod might need to be deleted more than once before finding the best energy efficient node for its placement by the scheduler. + - The experiment below demonstrates this use-case. + - The pods of a deployment "cpu-stress-test" were initially placed on the energy efficient node "tantawi1". As a result, only one of the pod of the deployment "php-apache" could be placed on the node "tantawi1" and its second pod is placed on the energy inefficient node "tantawi2". ![Pod-migration-stage-1](./figs/Migration_InitialPlacement.png) + - Even after the completion of the deployment "cpu-stress-test", a pod of the deployment "php-apache" continues to run on "tantawi2".![Pod-migration-stage-2](./figs/Migration_IntermediatePlacement.png) + - Deleting the pod from "tantawi2" triggers creation of a new pod by the deployment to meet the desired number of replicas and scheduler places the pod on the energy efficient node "tantawi1" now (In contrast, the default kube-scheduler would place the pod on the same node "tantawi2" as it spreads the pods across cluster nodes). ![Pod-migration-stage-3](./figs/Migration_FinalPlacement.png) +- Shutting down of a cluster node (via Cluster Autoscaler) + - Pod migration to a more energy efficient cluster node (demonstrated in the previous use-case) redcues the active power consumption on the less energy efficient nodes. + - Migrate each pod (irrespective of the deployment it belongs to) running on a less energy efficient node to (relatively) more energy efficient cluster nodes by repeated execution of individual pod migration (under assumption that there are enough resources on the more energy efficient cluster nodes). + - After all the pods are migrated, the utilization of the energy inefficient node becomes low which triggers the Cluster Autoscaler to delete that node. + - Node shutdown results in highest energy savings as it elimiates both the active and idle power consumption of a node. + - The graph below dipicts that the idle energy consumption of a node is a significant portion of the node's total power consumption.![NodeShutdown-IdleEnergySaving](./figs/NodeShutdown_IdleEnergySaving.png) + +### Test Plan +Unit tests are available with code coverage > 90%. + +### Graduation Criteria +### Upgrade / Downgrade Strategy +This is an optional plugin which can be easily included in the scheduler configuration as required. +### Version Skew Strategy +N/A + +## Production Readiness Review Questionnaire +### Feature Enablement and Rollback +How can this feature be enabled / disabled in a live cluster? + - Feature gate (also fill in values in kep.yaml) + - Feature gate name: + - Components depending on the feature gate: + - Other + - Describe the mechanism: + - Will enabling / disabling the feature require downtime of the control plane? + - Will enabling / disabling the feature require downtime or reprovisioning of a node? + +Does enabling the feature change any default behavior? + +It is an optional feature. It changes the way pods are placed on the nodes (like packing of pods on nodes vs. spreading of pods on nodes by default) + +Can the feature be disabled once it has been enabled (i.e. can we roll back the enablement)? + +Yes + +What happens if we re-enable the feature if it was previously rolled back? + +It is an optional feature. At any point, a deployment can specify to include PEAKS plugin configuration or not as desired. In other words, this feature can be re-enabled seemlessly even if it was previously rolled back. + +Are there any tests for feature enablement/disablement? + +Unit tests are available with `code coverage` > 90%. This is an optional plugin which can be easily included in the scheduler configuration as required. + +### Rollout, Upgrade and Rollback Planning +How can a rollout or rollback fail? Can it impact already running workloads? + +All the already scheduled pods continue to run. New pods can be scheduled using this plugin or not as needed. + +What specific metrics should inform a rollback? + +The PEAKS plugin can be selected per deployment granularity. Simply don't use the PEAKS scheduler configuration while deploying a pod if energy aware scheduling of pods on nodes is not a requirement. + +Were upgrade and rollback tested? Was the upgrade->downgrade->upgrade path tested? + +Yes + +Is the rollout accompanied by any deprecations and/or removals of features, APIs, fields of API types, flags, etc.? + +N/A + +### Monitoring Requirements +How can an operator determine if the feature is in use by workloads? + +By inspecting the workload deployment YAML file (kubectl edit deployment name-of-the-deployment)as it should specify the custom scheduler configuration to use this functionality. + +How can someone using this feature know that it is working for their instance? +- Events + - Event Reason: +- API .status + - Condition name: + - Other field: +- Other (treat as last resort) + - Details: + +What are the reasonable SLOs (Service Level Objectives) for the enhancement? + +The PEAKS scheduler plugin aims to reduce the energy consumption for workload execution. The amount of energy savings depend on the cluster configuration and workload characteristics. + +What are the SLIs (Service Level Indicators) an operator can use to determine the health of the service? +- Metrics + - Metric name: + - [Optional] Aggregation method: + - Components exposing the metric: +- Other (treat as last resort) + - Details: + +Are there any missing metrics that would be useful to have to improve observability of this feature? + +### Dependencies +Does this feature depend on any specific services running in the cluster? + +Yes, it depends on the Prometheus service running in the cluster for metrics (utilization and energy consumption) collection. + +### Scalability +Will enabling / using this feature result in any new API calls? + +No. + +Will enabling / using this feature result in introducing new API types? + +No. + +Will enabling / using this feature result in any new calls to the cloud provider? + +No. + +Will enabling / using this feature result in increasing size or count of the existing API objects? + +No. + +Will enabling / using this feature result in increasing time taken by any operations covered by [existing SLIs/SLOs](https://git.k8s.io/community/sig-scalability/slos/slos.md#kubernetes-slisslos)? + +No. + +Will enabling / using this feature result in non-negligible increase of resource usage (CPU, RAM, disk, IO, ...) in any components? + +No - the metrics are cached at load watcher and our plugins will only pull them when needed, and this shouldn’t be a non-negligible increase in resource usage. Moreover, the algorithms provided run in linear time for number of nodes. + +Can enabling / using this feature result in resource exhaustion of some node resources (PIDs, sockets, inodes, etc.)? + +No. + +### Troubleshooting +How does this feature react if the API server and/or etcd is unavailable? + +Running pods are not affected. Any new submissions would be rejected by scheduler. + +What are other known failure modes? + +N/A + +What steps should be taken if SLOs are not being met to determine the problem? + +N/A + +## Implementation History +## Drawbacks +## Alternatives +## Infrastructure Needed (Optional) diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/Kubectl-scale_Default.png b/kep/tbd-peaks-power-aware-scheduling/figs/Kubectl-scale_Default.png new file mode 100644 index 000000000..39b531e51 Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/Kubectl-scale_Default.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/Kubectl-scale_PEAKS.png b/kep/tbd-peaks-power-aware-scheduling/figs/Kubectl-scale_PEAKS.png new file mode 100644 index 000000000..037dd08dd Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/Kubectl-scale_PEAKS.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/Migration_FinalPlacement.png b/kep/tbd-peaks-power-aware-scheduling/figs/Migration_FinalPlacement.png new file mode 100644 index 000000000..9d46cfe44 Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/Migration_FinalPlacement.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/Migration_InitialPlacement.png b/kep/tbd-peaks-power-aware-scheduling/figs/Migration_InitialPlacement.png new file mode 100644 index 000000000..8799f1b03 Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/Migration_InitialPlacement.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/Migration_IntermediatePlacement.png b/kep/tbd-peaks-power-aware-scheduling/figs/Migration_IntermediatePlacement.png new file mode 100644 index 000000000..952b1a724 Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/Migration_IntermediatePlacement.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/NodeShutdown_IdleEnergySaving.png b/kep/tbd-peaks-power-aware-scheduling/figs/NodeShutdown_IdleEnergySaving.png new file mode 100644 index 000000000..4e7f708e8 Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/NodeShutdown_IdleEnergySaving.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_with_HPA.png b/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_with_HPA.png new file mode 100644 index 000000000..75a0c7a6a Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_with_HPA.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_with_Kube-scheduler.png b/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_with_Kube-scheduler.png new file mode 100644 index 000000000..92d8081ba Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_with_Kube-scheduler.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_workflow.png b/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_workflow.png new file mode 100644 index 000000000..9d414eb12 Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/PEAKS_workflow.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/figs/PowerModelsOfNodes.png b/kep/tbd-peaks-power-aware-scheduling/figs/PowerModelsOfNodes.png new file mode 100644 index 000000000..f1d9d644b Binary files /dev/null and b/kep/tbd-peaks-power-aware-scheduling/figs/PowerModelsOfNodes.png differ diff --git a/kep/tbd-peaks-power-aware-scheduling/kep.yaml b/kep/tbd-peaks-power-aware-scheduling/kep.yaml new file mode 100644 index 000000000..265e66eba --- /dev/null +++ b/kep/tbd-peaks-power-aware-scheduling/kep.yaml @@ -0,0 +1,15 @@ +title: Power Efficiency Aware Kubernetes Scheduling (PEAKS) +status: provisional +authors: + - "@knarayan" + - "@husky-parul" + - "@fali007" +owning-sig: sig-scheduling +reviewers: + - "@Huang-Wei" + - "@ahg-g" + - "@alculquicondor" +approvers: + - "@Huang-Wei" +editor: TBD +creation-date: 2024-07-04 diff --git a/manifests/appgroup/deploy-appgroup-controller.yaml b/manifests/appgroup/deploy-appgroup-controller.yaml index 1b8a27289..c80e0c7be 100644 --- a/manifests/appgroup/deploy-appgroup-controller.yaml +++ b/manifests/appgroup/deploy-appgroup-controller.yaml @@ -60,6 +60,4 @@ spec: containers: - name: appgroup-controller image: localhost:5000/appgroup-controller/controller:latest - command: - - /bin/controller - imagePullPolicy: IfNotPresent \ No newline at end of file + imagePullPolicy: IfNotPresent diff --git a/manifests/appgroup/deploy-sig-scheduling-controller-and-scheduler.yaml b/manifests/appgroup/deploy-sig-scheduling-controller-and-scheduler.yaml index 5e3ae84b4..c79b2160c 100644 --- a/manifests/appgroup/deploy-sig-scheduling-controller-and-scheduler.yaml +++ b/manifests/appgroup/deploy-sig-scheduling-controller-and-scheduler.yaml @@ -89,7 +89,7 @@ spec: serviceAccountName: scheduler-plugins-controller containers: - name: scheduler-plugins-controller - image: registry.k8s.io/scheduler-plugins/controller:v0.28.9 + image: registry.k8s.io/scheduler-plugins/controller:v0.29.7 imagePullPolicy: IfNotPresent --- # Install the scheduler @@ -113,9 +113,8 @@ spec: nodeSelector: # To deploy in master node node-role.kubernetes.io/master: "" containers: - - image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 - command: # For extra info, please add verbose level: e.g., - -v=9 - - /bin/kube-scheduler + - image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 + args: # For extra info, please add verbose level: e.g., - -v=9 - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf - --config=/etc/kubernetes/scheduler-config.yaml diff --git a/manifests/coscheduling/crd.yaml b/manifests/coscheduling/crd.yaml index de2c94b5b..d1e43e16b 100644 --- a/manifests/coscheduling/crd.yaml +++ b/manifests/coscheduling/crd.yaml @@ -74,7 +74,9 @@ spec: MinMember defines the minimal number of members/tasks to run the pod group; if there's not enough resources to start all tasks, the scheduler will not start any. + The minimum is 1 format: int32 + minimum: 1 type: integer minResources: additionalProperties: diff --git a/manifests/install/all-in-one.yaml b/manifests/install/all-in-one.yaml index 50b0c7b14..2ef1e5585 100644 --- a/manifests/install/all-in-one.yaml +++ b/manifests/install/all-in-one.yaml @@ -96,5 +96,5 @@ spec: serviceAccountName: scheduler-plugins-controller containers: - name: scheduler-plugins-controller - image: registry.k8s.io/scheduler-plugins/controller:v0.28.9 + image: registry.k8s.io/scheduler-plugins/controller:v0.29.7 imagePullPolicy: IfNotPresent diff --git a/manifests/install/charts/as-a-second-scheduler/Chart.yaml b/manifests/install/charts/as-a-second-scheduler/Chart.yaml index db56080fa..4088586a8 100644 --- a/manifests/install/charts/as-a-second-scheduler/Chart.yaml +++ b/manifests/install/charts/as-a-second-scheduler/Chart.yaml @@ -15,9 +15,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.28.9 +version: 0.29.7 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.28.9 +appVersion: 0.29.7 diff --git a/manifests/install/charts/as-a-second-scheduler/README.md b/manifests/install/charts/as-a-second-scheduler/README.md index 947a36246..78c230688 100644 --- a/manifests/install/charts/as-a-second-scheduler/README.md +++ b/manifests/install/charts/as-a-second-scheduler/README.md @@ -23,10 +23,12 @@ Quick start instructions for the setup and configuration of as-a-second-schedule #### Install chart using Helm v3.0+ +> 🆕 Starting v0.28, Helm charts are hosted on https://scheduler-plugins.sigs.k8s.io + ```bash $ git clone git@github.com:kubernetes-sigs/scheduler-plugins.git $ cd scheduler-plugins/manifests/install/charts -$ helm install scheduler-plugins as-a-second-scheduler/ --create-namespace --namespace scheduler-plugins +$ helm install --repo https://scheduler-plugins.sigs.k8s.io scheduler-plugins scheduler-plugins ``` #### Verify that scheduler and plugin-controller pod are running properly. @@ -45,14 +47,14 @@ The following table lists the configurable parameters of the as-a-second-schedul | Parameter | Description | Default | |---------------------------|-----------------------------|-------------------------------------------------------------------------------------------------| | `scheduler.name` | Scheduler name | `scheduler-plugins-scheduler` | -| `scheduler.image` | Scheduler image | `registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9` | +| `scheduler.image` | Scheduler image | `registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7` | | `scheduler.leaderElect` | Scheduler leaderElection | `false` | | `scheduler.replicaCount` | Scheduler replicaCount | `1` | | `scheduler.nodeSelector` | Scheduler nodeSelector | `{}` | | `scheduler.affinity` | Scheduler affinity | `{}` | | `scheduler.tolerations` | Scheduler tolerations | `[]` | | `controller.name` | Controller name | `scheduler-plugins-controller` | -| `controller.image` | Controller image | `registry.k8s.io/scheduler-plugins/controller:v0.28.9` | +| `controller.image` | Controller image | `registry.k8s.io/scheduler-plugins/controller:v0.29.7` | | `controller.replicaCount` | Controller replicaCount | `1` | | `controller.nodeSelector` | Controller nodeSelector | `{}` | | `controller.affinity` | Controller affinity | `{}` | diff --git a/manifests/install/charts/as-a-second-scheduler/templates/deployment.yaml b/manifests/install/charts/as-a-second-scheduler/templates/deployment.yaml index 9d7286be3..9d0b37305 100644 --- a/manifests/install/charts/as-a-second-scheduler/templates/deployment.yaml +++ b/manifests/install/charts/as-a-second-scheduler/templates/deployment.yaml @@ -50,8 +50,7 @@ spec: spec: serviceAccountName: {{ .Values.scheduler.name }} containers: - - command: - - /bin/kube-scheduler + - args: - --config=/etc/kubernetes/scheduler-config.yaml image: {{ .Values.scheduler.image }} imagePullPolicy: IfNotPresent @@ -91,4 +90,4 @@ spec: {{- with .Values.scheduler.tolerations }} tolerations: {{- toYaml . | nindent 8}} {{- end }} - \ No newline at end of file + diff --git a/manifests/install/charts/as-a-second-scheduler/templates/rbac.yaml b/manifests/install/charts/as-a-second-scheduler/templates/rbac.yaml index 8d675c252..172144bad 100644 --- a/manifests/install/charts/as-a-second-scheduler/templates/rbac.yaml +++ b/manifests/install/charts/as-a-second-scheduler/templates/rbac.yaml @@ -66,7 +66,7 @@ rules: - apiGroups: ["scheduling.x-k8s.io"] resources: ["podgroups", "elasticquotas", "podgroups/status", "elasticquotas/status"] verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] -# for network-aware plugins add the following lines (scheduler-plugins v0.28.9) +# for network-aware plugins add the following lines (scheduler-plugins v0.29.7) #- apiGroups: [ "appgroup.diktyo.x-k8s.io" ] # resources: [ "appgroups" ] # verbs: [ "get", "list", "watch", "create", "delete", "update", "patch" ] diff --git a/manifests/install/charts/as-a-second-scheduler/values.yaml b/manifests/install/charts/as-a-second-scheduler/values.yaml index a64968ee0..5d59314f3 100644 --- a/manifests/install/charts/as-a-second-scheduler/values.yaml +++ b/manifests/install/charts/as-a-second-scheduler/values.yaml @@ -4,7 +4,7 @@ scheduler: name: scheduler-plugins-scheduler - image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 + image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 replicaCount: 1 leaderElect: false nodeSelector: {} @@ -13,7 +13,7 @@ scheduler: controller: name: scheduler-plugins-controller - image: registry.k8s.io/scheduler-plugins/controller:v0.28.9 + image: registry.k8s.io/scheduler-plugins/controller:v0.29.7 replicaCount: 1 nodeSelector: {} affinity: {} diff --git a/manifests/networktopology/deploy-networktopology-controller.yaml b/manifests/networktopology/deploy-networktopology-controller.yaml index e438bbb4c..7cb7872d1 100644 --- a/manifests/networktopology/deploy-networktopology-controller.yaml +++ b/manifests/networktopology/deploy-networktopology-controller.yaml @@ -69,6 +69,4 @@ spec: containers: - name: networktopology-controller image: localhost:5000/networktopology-controller/controller:latest - command: - - /bin/controller - imagePullPolicy: IfNotPresent \ No newline at end of file + imagePullPolicy: IfNotPresent diff --git a/manifests/networktopology/deploy-sig-scheduling-controller-and-scheduler.yaml b/manifests/networktopology/deploy-sig-scheduling-controller-and-scheduler.yaml index 5e3ae84b4..c79b2160c 100644 --- a/manifests/networktopology/deploy-sig-scheduling-controller-and-scheduler.yaml +++ b/manifests/networktopology/deploy-sig-scheduling-controller-and-scheduler.yaml @@ -89,7 +89,7 @@ spec: serviceAccountName: scheduler-plugins-controller containers: - name: scheduler-plugins-controller - image: registry.k8s.io/scheduler-plugins/controller:v0.28.9 + image: registry.k8s.io/scheduler-plugins/controller:v0.29.7 imagePullPolicy: IfNotPresent --- # Install the scheduler @@ -113,9 +113,8 @@ spec: nodeSelector: # To deploy in master node node-role.kubernetes.io/master: "" containers: - - image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 - command: # For extra info, please add verbose level: e.g., - -v=9 - - /bin/kube-scheduler + - image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 + args: # For extra info, please add verbose level: e.g., - -v=9 - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf - --config=/etc/kubernetes/scheduler-config.yaml diff --git a/manifests/noderesourcetopology/deploy.yaml b/manifests/noderesourcetopology/deploy.yaml index ff5855eae..151510197 100644 --- a/manifests/noderesourcetopology/deploy.yaml +++ b/manifests/noderesourcetopology/deploy.yaml @@ -25,8 +25,7 @@ spec: containers: - image: localhost:5000/scheduler-plugins/kube-scheduler:latest imagePullPolicy: Never - command: - - /bin/kube-scheduler + args: - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf - --config=/etc/kubernetes/scheduler-config/scheduler-config.yaml diff --git a/pkg/capacityscheduling/capacity_scheduling.go b/pkg/capacityscheduling/capacity_scheduling.go index 3878d351d..b9caa8509 100644 --- a/pkg/capacityscheduling/capacity_scheduling.go +++ b/pkg/capacityscheduling/capacity_scheduling.go @@ -193,7 +193,8 @@ func New(ctx context.Context, obj runtime.Object, handle framework.Handle) (fram func (c *CapacityScheduling) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: - // https://git.k8s.io/kubernetes/pkg/scheduler/eventhandlers.go#L403-L410 + // https://github.com/kubernetes/kubernetes/pull/101394 + // Please follow: eventhandlers.go#L403-L410 eqGVK := fmt.Sprintf("elasticquotas.v1alpha1.%v", scheduling.GroupName) return []framework.ClusterEventWithHint{ {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}}, diff --git a/pkg/coscheduling/coscheduling.go b/pkg/coscheduling/coscheduling.go index 7b93a4d75..55f5f368c 100644 --- a/pkg/coscheduling/coscheduling.go +++ b/pkg/coscheduling/coscheduling.go @@ -104,7 +104,8 @@ func New(_ context.Context, obj runtime.Object, handle framework.Handle) (framew func (cs *Coscheduling) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: - // https://git.k8s.io/kubernetes/pkg/scheduler/eventhandlers.go#L403-L410 + // https://github.com/kubernetes/kubernetes/pull/101394 + // Please follow: eventhandlers.go#L403-L410 pgGVK := fmt.Sprintf("podgroups.v1alpha1.%v", scheduling.GroupName) return []framework.ClusterEventWithHint{ {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Add}}, diff --git a/pkg/networkaware/networkoverhead/networkoverhead_test.go b/pkg/networkaware/networkoverhead/networkoverhead_test.go index 9d5e99c57..204207f25 100644 --- a/pkg/networkaware/networkoverhead/networkoverhead_test.go +++ b/pkg/networkaware/networkoverhead/networkoverhead_test.go @@ -286,8 +286,8 @@ func GetAppGroupCROnlineBoutique() *agv1alpha1.AppGroup { }, }, Status: agv1alpha1.AppGroupStatus{ - ScheduleStartTime: metav1.Time{time.Now()}, - TopologyCalculationTime: metav1.Time{time.Now()}, + ScheduleStartTime: metav1.Now(), + TopologyCalculationTime: metav1.Now(), TopologyOrder: agv1alpha1.AppGroupTopologyList{ agv1alpha1.AppGroupTopologyInfo{Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p1-deployment", Selector: "p1", APIVersion: "apps/v1", Namespace: "default"}, Index: 1}, agv1alpha1.AppGroupTopologyInfo{Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p10-deployment", Selector: "p10", APIVersion: "apps/v1", Namespace: "default"}, Index: 2}, @@ -330,8 +330,9 @@ func GetAppGroupCRBasic() *agv1alpha1.AppGroup { }, }, Status: agv1alpha1.AppGroupStatus{ - RunningWorkloads: 3, - ScheduleStartTime: metav1.Time{time.Now()}, TopologyCalculationTime: metav1.Time{time.Now()}, + RunningWorkloads: 3, + ScheduleStartTime: metav1.Now(), + TopologyCalculationTime: metav1.Now(), TopologyOrder: agv1alpha1.AppGroupTopologyList{ agv1alpha1.AppGroupTopologyInfo{ Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p1-deployment", Selector: "p1", APIVersion: "apps/v1", Namespace: "default"}, Index: 1}, diff --git a/pkg/networkaware/topologicalsort/topologicalsort_test.go b/pkg/networkaware/topologicalsort/topologicalsort_test.go index c12d9320a..c8c206b1c 100644 --- a/pkg/networkaware/topologicalsort/topologicalsort_test.go +++ b/pkg/networkaware/topologicalsort/topologicalsort_test.go @@ -21,7 +21,6 @@ import ( "math" "sort" "testing" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -105,8 +104,8 @@ func GetAppGroupCROnlineBoutique() *agv1alpha1.AppGroup { }, }, Status: agv1alpha1.AppGroupStatus{ - ScheduleStartTime: metav1.Time{time.Now()}, - TopologyCalculationTime: metav1.Time{time.Now()}, + ScheduleStartTime: metav1.Now(), + TopologyCalculationTime: metav1.Now(), TopologyOrder: agv1alpha1.AppGroupTopologyList{ agv1alpha1.AppGroupTopologyInfo{Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p1-deployment", Selector: "p1", APIVersion: "apps/v1", Namespace: "default"}, Index: 1}, agv1alpha1.AppGroupTopologyInfo{Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p10-deployment", Selector: "p10", APIVersion: "apps/v1", Namespace: "default"}, Index: 2}, @@ -145,8 +144,9 @@ func GetAppGroupCRBasic() *agv1alpha1.AppGroup { }, }, Status: agv1alpha1.AppGroupStatus{ - RunningWorkloads: 3, - ScheduleStartTime: metav1.Time{time.Now()}, TopologyCalculationTime: metav1.Time{time.Now()}, + RunningWorkloads: 3, + ScheduleStartTime: metav1.Now(), + TopologyCalculationTime: metav1.Now(), TopologyOrder: agv1alpha1.AppGroupTopologyList{ agv1alpha1.AppGroupTopologyInfo{ Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p1-deployment", Selector: "p1", APIVersion: "apps/v1", Namespace: "default"}, Index: 1}, diff --git a/pkg/noderesourcetopology/cache/cache.go b/pkg/noderesourcetopology/cache/cache.go index dcb176ff9..ad05338c8 100644 --- a/pkg/noderesourcetopology/cache/cache.go +++ b/pkg/noderesourcetopology/cache/cache.go @@ -24,6 +24,20 @@ import ( topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2" ) +type CachedNRTInfo struct { + // Generation is akin to the object resourceVersion and represents + // the observed state in the cache. It's an opaque monotonically increasing number which can only compared for equality + // and which is only increased in the resync loop. It is used to cross correlate resync attempts with observed state + // with cache content. Used only in logging. If the cache implementation has no concept of caching nor generation, + // it should always return 0 (zero). + Generation uint64 + + // Fresh signals the caller if the NRT data is fresh. + // If true, the data is fresh and ready to be consumed. + // If false, the data is stale and the caller need to wait for a future refresh. + Fresh bool +} + type Interface interface { // GetCachedNRTCopy retrieves a NRT copy from cache, and then deducts over-reserved resources if necessary. // It will be used as the source of truth across the Pod's scheduling cycle. @@ -31,10 +45,8 @@ type Interface interface { // of NRT pertaining to the same node, pessimistically overallocated on ALL the NUMA zones of the node. // The pod argument is used only for logging purposes. // Returns nil if there is no NRT data available for the node named `nodeName`. - // Returns a boolean to signal the caller if the NRT data is fresh. - // If true, the data is fresh and ready to be consumed. - // If false, the data is stale and the caller need to wait for a future refresh. - GetCachedNRTCopy(ctx context.Context, nodeName string, pod *corev1.Pod) (*topologyv1alpha2.NodeResourceTopology, bool) + // Returns a CachedNRTInfo describing the NRT data returned. Meaningful only if `nrt` != nil. + GetCachedNRTCopy(ctx context.Context, nodeName string, pod *corev1.Pod) (*topologyv1alpha2.NodeResourceTopology, CachedNRTInfo) // NodeMaybeOverReserved declares a node was filtered out for not enough resources available. // This means this node is eligible for a resync. When a node is marked discarded (dirty), it matters not diff --git a/pkg/noderesourcetopology/cache/cache_test.go b/pkg/noderesourcetopology/cache/cache_test.go index 90c8a4086..9df645a78 100644 --- a/pkg/noderesourcetopology/cache/cache_test.go +++ b/pkg/noderesourcetopology/cache/cache_test.go @@ -100,10 +100,10 @@ func checkGetCachedNRTCopy(t *testing.T, makeCache func(client ctrlclient.WithWa nrtCache.NodeHasForeignPods(tc.nodeName, pod) } - gotNRT, gotOK := nrtCache.GetCachedNRTCopy(ctx, tc.nodeName, pod) + gotNRT, gotInfo := nrtCache.GetCachedNRTCopy(ctx, tc.nodeName, pod) - if gotOK != tc.expectedOK { - t.Fatalf("unexpected object status from cache: got: %v expected: %v", gotOK, tc.expectedOK) + if gotInfo.Fresh != tc.expectedOK { + t.Fatalf("unexpected object status from cache: got: %v expected: %v", gotInfo.Fresh, tc.expectedOK) } if gotNRT != nil && tc.expectedNRT == nil { t.Fatalf("object from cache not nil but expected nil") diff --git a/pkg/noderesourcetopology/cache/discardreserved.go b/pkg/noderesourcetopology/cache/discardreserved.go index ff877f9ee..b59d8cb2f 100644 --- a/pkg/noderesourcetopology/cache/discardreserved.go +++ b/pkg/noderesourcetopology/cache/discardreserved.go @@ -58,20 +58,21 @@ func NewDiscardReserved(lh logr.Logger, client ctrlclient.Client) Interface { } } -func (pt *DiscardReserved) GetCachedNRTCopy(ctx context.Context, nodeName string, _ *corev1.Pod) (*topologyv1alpha2.NodeResourceTopology, bool) { +func (pt *DiscardReserved) GetCachedNRTCopy(ctx context.Context, nodeName string, _ *corev1.Pod) (*topologyv1alpha2.NodeResourceTopology, CachedNRTInfo) { pt.rMutex.RLock() defer pt.rMutex.RUnlock() if t, ok := pt.reservationMap[nodeName]; ok { if len(t) > 0 { - return nil, false + return nil, CachedNRTInfo{} } } + info := CachedNRTInfo{Fresh: true} nrt := &topologyv1alpha2.NodeResourceTopology{} if err := pt.client.Get(ctx, types.NamespacedName{Name: nodeName}, nrt); err != nil { - return nil, true + return nil, info } - return nrt, true + return nrt, info } func (pt *DiscardReserved) NodeMaybeOverReserved(nodeName string, pod *corev1.Pod) {} diff --git a/pkg/noderesourcetopology/cache/discardreserved_test.go b/pkg/noderesourcetopology/cache/discardreserved_test.go index 63c763dca..46e86772f 100644 --- a/pkg/noderesourcetopology/cache/discardreserved_test.go +++ b/pkg/noderesourcetopology/cache/discardreserved_test.go @@ -66,8 +66,8 @@ func TestDiscardReservedNodesGetNRTCopyFails(t *testing.T) { }, } - nrtObj, ok := nrtCache.GetCachedNRTCopy(context.Background(), "node1", &corev1.Pod{}) - if ok { + nrtObj, nrtInfo := nrtCache.GetCachedNRTCopy(context.Background(), "node1", &corev1.Pod{}) + if nrtInfo.Fresh { t.Fatal("expected false\ngot true\n") } if nrtObj != nil { diff --git a/pkg/noderesourcetopology/cache/overreserve.go b/pkg/noderesourcetopology/cache/overreserve.go index 505ba955d..9cd5008c1 100644 --- a/pkg/noderesourcetopology/cache/overreserve.go +++ b/pkg/noderesourcetopology/cache/overreserve.go @@ -45,6 +45,7 @@ type OverReserve struct { lh logr.Logger client ctrlclient.Reader lock sync.Mutex + generation uint64 nrts *nrtStore assumedResources map[string]*resourceStore // nodeName -> resourceStore // nodesMaybeOverreserved counts how many times a node is filtered out. This is used as trigger condition to try @@ -97,30 +98,33 @@ func NewOverReserve(ctx context.Context, lh logr.Logger, cfg *apiconfig.NodeReso return obj, nil } -func (ov *OverReserve) GetCachedNRTCopy(ctx context.Context, nodeName string, pod *corev1.Pod) (*topologyv1alpha2.NodeResourceTopology, bool) { +func (ov *OverReserve) GetCachedNRTCopy(ctx context.Context, nodeName string, pod *corev1.Pod) (*topologyv1alpha2.NodeResourceTopology, CachedNRTInfo) { ov.lock.Lock() defer ov.lock.Unlock() if ov.nodesWithForeignPods.IsSet(nodeName) { - return nil, false + return nil, CachedNRTInfo{} } + info := CachedNRTInfo{Fresh: true} nrt := ov.nrts.GetNRTCopyByNodeName(nodeName) if nrt == nil { - return nil, true + return nil, info } + + info.Generation = ov.generation nodeAssumedResources, ok := ov.assumedResources[nodeName] if !ok { - return nrt, true + return nrt, info } logID := klog.KObj(pod) - lh := ov.lh.WithValues(logging.KeyPod, logID, logging.KeyPodUID, logging.PodUID(pod), logging.KeyNode, nodeName) + lh := ov.lh.WithValues(logging.KeyPod, logID, logging.KeyPodUID, logging.PodUID(pod), logging.KeyNode, nodeName, logging.KeyGeneration, ov.generation) lh.V(6).Info("NRT", "fromcache", stringify.NodeResourceTopologyResources(nrt)) nodeAssumedResources.UpdateNRT(nrt, logging.KeyPod, logID) lh.V(5).Info("NRT", "withassumed", stringify.NodeResourceTopologyResources(nrt)) - return nrt, true + return nrt, info } func (ov *OverReserve) NodeMaybeOverReserved(nodeName string, pod *corev1.Pod) { @@ -176,6 +180,7 @@ func (ov *OverReserve) UnreserveNodeResources(nodeName string, pod *corev1.Pod) } type DesyncedNodes struct { + Generation uint64 MaybeOverReserved []string ConfigChanged []string } @@ -207,6 +212,10 @@ func (rn DesyncedNodes) DirtyCount() int { func (ov *OverReserve) GetDesyncedNodes(lh logr.Logger) DesyncedNodes { ov.lock.Lock() defer ov.lock.Unlock() + + // make sure to log the generation to be able to crosscorrelate with later logs + lh = lh.WithValues(logging.KeyGeneration, ov.generation) + // this is intentionally aggressive. We don't yet make any attempt to find out if the // node was discarded because pessimistically overrserved (which should indeed trigger // a resync) or if it was discarded because the actual resources on the node really were @@ -229,6 +238,7 @@ func (ov *OverReserve) GetDesyncedNodes(lh logr.Logger) DesyncedNodes { lh.V(4).Info("found dirty nodes", "foreign", foreignCount, "discarded", overreservedCount, "configChange", configChangeCount, "total", nodes.Len()) } return DesyncedNodes{ + Generation: ov.generation, MaybeOverReserved: nodes.Keys(), ConfigChanged: configChangeNodes.Keys(), } @@ -244,11 +254,14 @@ func (ov *OverReserve) GetDesyncedNodes(lh logr.Logger) DesyncedNodes { // too aggressive resync attempts, so to more, likely unnecessary, computation work on the scheduler side. func (ov *OverReserve) Resync() { // we are not working with a specific pod, so we need a unique key to track this flow - lh_ := ov.lh.WithName(logging.FlowCacheSync).WithValues(logging.KeyLogID, logging.TimeLogID()) + lh_ := ov.lh.WithName(logging.FlowCacheSync) lh_.V(4).Info(logging.FlowBegin) defer lh_.V(4).Info(logging.FlowEnd) nodes := ov.GetDesyncedNodes(lh_) + // we start without because chicken/egg problem. This is the earliest we can use the generation value. + lh_ = lh_.WithValues(logging.KeyGeneration, nodes.Generation) + // avoid as much as we can unnecessary work and logs. if nodes.Len() == 0 { lh_.V(5).Info("no dirty nodes detected") @@ -331,6 +344,7 @@ func (ov *OverReserve) Resync() { func (ov *OverReserve) FlushNodes(lh logr.Logger, nrts ...*topologyv1alpha2.NodeResourceTopology) { ov.lock.Lock() defer ov.lock.Unlock() + for _, nrt := range nrts { lh.V(2).Info("flushing", logging.KeyNode, nrt.Name) ov.nrts.Update(nrt) @@ -339,6 +353,14 @@ func (ov *OverReserve) FlushNodes(lh logr.Logger, nrts ...*topologyv1alpha2.Node ov.nodesWithForeignPods.Delete(nrt.Name) ov.nodesWithAttrUpdate.Delete(nrt.Name) } + + if len(nrts) == 0 { + return + } + + // increase only if we mutated the internal state + ov.generation += 1 + lh.V(2).Info("generation", "new", ov.generation) } // to be used only in tests diff --git a/pkg/noderesourcetopology/cache/overreserve_test.go b/pkg/noderesourcetopology/cache/overreserve_test.go index 0d02ad95d..33214b444 100644 --- a/pkg/noderesourcetopology/cache/overreserve_test.go +++ b/pkg/noderesourcetopology/cache/overreserve_test.go @@ -720,8 +720,8 @@ func TestNodeWithForeignPods(t *testing.T) { t.Errorf("unexpected dirty nodes: %v", nodes.MaybeOverReserved) } - _, ok := nrtCache.GetCachedNRTCopy(context.Background(), target, &corev1.Pod{}) - if ok { + _, info := nrtCache.GetCachedNRTCopy(context.Background(), target, &corev1.Pod{}) + if info.Fresh { t.Errorf("succesfully got node with foreign pods!") } } diff --git a/pkg/noderesourcetopology/cache/passthrough.go b/pkg/noderesourcetopology/cache/passthrough.go index ac2107613..29f1e2a46 100644 --- a/pkg/noderesourcetopology/cache/passthrough.go +++ b/pkg/noderesourcetopology/cache/passthrough.go @@ -40,14 +40,15 @@ func NewPassthrough(lh logr.Logger, client ctrlclient.Client) Interface { } } -func (pt Passthrough) GetCachedNRTCopy(ctx context.Context, nodeName string, _ *corev1.Pod) (*topologyv1alpha2.NodeResourceTopology, bool) { +func (pt Passthrough) GetCachedNRTCopy(ctx context.Context, nodeName string, _ *corev1.Pod) (*topologyv1alpha2.NodeResourceTopology, CachedNRTInfo) { pt.lh.V(5).Info("lister for NRT plugin") + info := CachedNRTInfo{Fresh: true} nrt := &topologyv1alpha2.NodeResourceTopology{} if err := pt.client.Get(ctx, types.NamespacedName{Name: nodeName}, nrt); err != nil { pt.lh.V(5).Error(err, "cannot get nrts from lister") - return nil, true + return nil, info } - return nrt, true + return nrt, info } func (pt Passthrough) NodeMaybeOverReserved(nodeName string, pod *corev1.Pod) {} diff --git a/pkg/noderesourcetopology/filter.go b/pkg/noderesourcetopology/filter.go index 58ba44899..f627d0639 100644 --- a/pkg/noderesourcetopology/filter.go +++ b/pkg/noderesourcetopology/filter.go @@ -196,8 +196,9 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle lh.V(4).Info(logging.FlowBegin) defer lh.V(4).Info(logging.FlowEnd) - nodeTopology, ok := tm.nrtCache.GetCachedNRTCopy(ctx, nodeName, pod) - if !ok { + nodeTopology, info := tm.nrtCache.GetCachedNRTCopy(ctx, nodeName, pod) + lh = lh.WithValues(logging.KeyGeneration, info.Generation) + if !info.Fresh { lh.V(2).Info("invalid topology data") return framework.NewStatus(framework.Unschedulable, "invalid node topology data") } diff --git a/pkg/noderesourcetopology/logging/logging.go b/pkg/noderesourcetopology/logging/logging.go index 14d3ee2b1..78bfa3998 100644 --- a/pkg/noderesourcetopology/logging/logging.go +++ b/pkg/noderesourcetopology/logging/logging.go @@ -17,9 +17,7 @@ limitations under the License. package logging import ( - "fmt" "reflect" - "time" corev1 "k8s.io/api/core/v1" ) @@ -33,6 +31,7 @@ const ( KeyFlow string = "flow" KeyContainer string = "container" KeyContainerKind string = "kind" + KeyGeneration string = "generation" ) const ( @@ -63,7 +62,3 @@ func PodUID(pod *corev1.Pod) string { } return string(pod.GetUID()) } - -func TimeLogID() string { - return fmt.Sprintf("uts/%v", time.Now().UnixMilli()) -} diff --git a/pkg/noderesourcetopology/plugin.go b/pkg/noderesourcetopology/plugin.go index 9efd04864..d9f2a1f9a 100644 --- a/pkg/noderesourcetopology/plugin.go +++ b/pkg/noderesourcetopology/plugin.go @@ -122,7 +122,8 @@ func New(ctx context.Context, args runtime.Object, handle framework.Handle) (fra // that make other Pods schedulable. func (tm *TopologyMatch) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: - // https://git.k8s.io/kubernetes/pkg/scheduler/eventhandlers.go#L403-L410 + // https://github.com/kubernetes/kubernetes/pull/101394 + // Please follow: eventhandlers.go#L403-L410 nrtGVK := fmt.Sprintf("noderesourcetopologies.v1alpha2.%v", topologyapi.GroupName) return []framework.ClusterEventWithHint{ {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}}, diff --git a/pkg/noderesourcetopology/score.go b/pkg/noderesourcetopology/score.go index 88a92c414..e82c9a76f 100644 --- a/pkg/noderesourcetopology/score.go +++ b/pkg/noderesourcetopology/score.go @@ -73,9 +73,9 @@ func (tm *TopologyMatch) Score(ctx context.Context, state *framework.CycleState, return framework.MaxNodeScore, nil } - nodeTopology, ok := tm.nrtCache.GetCachedNRTCopy(ctx, nodeName, pod) - - if !ok { + nodeTopology, info := tm.nrtCache.GetCachedNRTCopy(ctx, nodeName, pod) + lh = lh.WithValues(logging.KeyGeneration, info.Generation) + if !info.Fresh { lh.V(4).Info("noderesourcetopology is not valid for node") return 0, nil } diff --git a/pkg/trimaran/lowriskovercommitment/lowriskovercommitment.go b/pkg/trimaran/lowriskovercommitment/lowriskovercommitment.go index 36f5427b0..bf8492b00 100644 --- a/pkg/trimaran/lowriskovercommitment/lowriskovercommitment.go +++ b/pkg/trimaran/lowriskovercommitment/lowriskovercommitment.go @@ -89,7 +89,7 @@ func New(_ context.Context, obj runtime.Object, handle framework.Handle) (framew } // PreScore : calculate pod requests and limits and store as plugin state data to be used during scoring -func (pl *LowRiskOverCommitment) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { +func (pl *LowRiskOverCommitment) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { klog.V(6).InfoS("PreScore: Calculating pod resource requests and limits", "pod", klog.KObj(pod)) podResourcesStateData := CreatePodResourcesStateData(pod) cycleState.Write(PodResourcesKey, podResourcesStateData) diff --git a/pkg/trimaran/lowriskovercommitment/lowriskovercommitment_test.go b/pkg/trimaran/lowriskovercommitment/lowriskovercommitment_test.go index 8e52c8f77..93c267ad5 100644 --- a/pkg/trimaran/lowriskovercommitment/lowriskovercommitment_test.go +++ b/pkg/trimaran/lowriskovercommitment/lowriskovercommitment_test.go @@ -217,7 +217,7 @@ func TestLowRiskOverCommitment_Score(t *testing.T) { p, _ := New(ctx, &lowRiskOverCommitmentArgs, fh) preScorePlugin := p.(framework.PreScorePlugin) - status := preScorePlugin.PreScore(context.Background(), state, tt.pod, tt.nodes) + status := preScorePlugin.PreScore(context.Background(), state, tt.pod, tf.BuildNodeInfos(tt.nodes)) assert.True(t, status.IsSuccess()) scorePlugin := p.(framework.ScorePlugin) diff --git a/site/content/en/_index.md b/site/content/en/_index.md index da56f365c..66de0c8ab 100644 --- a/site/content/en/_index.md +++ b/site/content/en/_index.md @@ -60,34 +60,47 @@ on demand, (e.g., `v0.18.800`) are used to indicated that the k8s client package changed since the previous release, and that only scheduler plugins code (features or bug fixes) was changed. -| Scheduler Plugins | Compiled With k8s Version | Container Image | Arch | -|-------------------|---------------------------|-----------------------------------------------------------|----------------| -| v0.28.9 | v1.28.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 | AMD64
ARM64 | -| v0.27.8 | v1.27.8 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.27.8 | AMD64
ARM64 | -| v0.26.7 | v1.26.7 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.26.7 | AMD64
ARM64 | -| v0.25.12 | v1.25.12 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.25.12 | AMD64
ARM64 | -| v0.24.9 | v1.24.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.24.9 | AMD64
ARM64 | -| v0.23.10 | v1.23.10 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.23.10 | AMD64
ARM64 | -| v0.22.6 | v1.22.6 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.22.6 | AMD64
ARM64 | -| v0.21.6 | v1.21.6 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.21.6 | AMD64
ARM64 | -| v0.20.10 | v1.20.10 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.20.10 | AMD64
ARM64 | -| v0.19.9 | v1.19.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.19.9 | AMD64
ARM64 | -| v0.19.8 | v1.19.8 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.19.8 | AMD64
ARM64 | -| v0.18.9 | v1.18.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.18.9 | AMD64 | - -| Controller | Compiled With k8s Version | Container Image | Arch | -|------------|---------------------------|-------------------------------------------------------|----------------| -| v0.28.9 | v1.28.9 | registry.k8s.io/scheduler-plugins/controller:v0.28.9 | AMD64
ARM64 | -| v0.27.8 | v1.27.8 | registry.k8s.io/scheduler-plugins/controller:v0.27.8 | AMD64
ARM64 | -| v0.26.7 | v1.26.7 | registry.k8s.io/scheduler-plugins/controller:v0.26.7 | AMD64
ARM64 | -| v0.25.12 | v1.25.12 | registry.k8s.io/scheduler-plugins/controller:v0.25.12 | AMD64
ARM64 | -| v0.24.9 | v1.24.9 | registry.k8s.io/scheduler-plugins/controller:v0.24.9 | AMD64
ARM64 | -| v0.23.10 | v1.23.10 | registry.k8s.io/scheduler-plugins/controller:v0.23.10 | AMD64
ARM64 | -| v0.22.6 | v1.22.6 | registry.k8s.io/scheduler-plugins/controller:v0.22.6 | AMD64
ARM64 | -| v0.21.6 | v1.21.6 | registry.k8s.io/scheduler-plugins/controller:v0.21.6 | AMD64
ARM64 | -| v0.20.10 | v1.20.10 | registry.k8s.io/scheduler-plugins/controller:v0.20.10 | AMD64
ARM64 | -| v0.19.9 | v1.19.9 | registry.k8s.io/scheduler-plugins/controller:v0.19.9 | AMD64
ARM64 | -| v0.19.8 | v1.19.8 | registry.k8s.io/scheduler-plugins/controller:v0.19.8 | AMD64
ARM64 | +| Scheduler Plugins | Compiled With k8s Version | Container Image | Arch | +|-------------------|---------------------------|-----------------------------------------------------------|------------------------------------------------------------| +| v0.29.7 | v1.29.7 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 | linux/amd64
linux/arm64
linux/s390x
linux/ppc64le | +| v0.28.9 | v1.28.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 | linux/amd64
linux/arm64 | +| v0.27.8 | v1.27.8 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.27.8 | linux/amd64
linux/arm64 | + +| Controller | Compiled With k8s Version | Container Image | Arch | +|------------|---------------------------|-------------------------------------------------------|------------------------------------------------------------| +| v0.29.7 | v1.29.7 | registry.k8s.io/scheduler-plugins/controller:v0.29.7 | linux/amd64
linux/arm64
linux/s390x
linux/ppc64le | +| v0.28.9 | v1.28.9 | registry.k8s.io/scheduler-plugins/controller:v0.28.9 | linux/amd64
linux/arm64 | +| v0.27.8 | v1.27.8 | registry.k8s.io/scheduler-plugins/controller:v0.27.8 | linux/amd64
linux/arm64 | + +
+Older releases + +| Scheduler Plugins | Compiled With k8s Version | Container Image | Arch | +|-------------------|---------------------------|-----------------------------------------------------------|----------------------------| +| v0.26.7 | v1.26.7 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.26.7 | linux/amd64
linux/arm64 | +| v0.25.12 | v1.25.12 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.25.12 | linux/amd64
linux/arm64 | +| v0.24.9 | v1.24.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.24.9 | linux/amd64
linux/arm64 | +| v0.23.10 | v1.23.10 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.23.10 | linux/amd64
linux/arm64 | +| v0.22.6 | v1.22.6 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.22.6 | linux/amd64
linux/arm64 | +| v0.21.6 | v1.21.6 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.21.6 | linux/amd64
linux/arm64 | +| v0.20.10 | v1.20.10 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.20.10 | linux/amd64
linux/arm64 | +| v0.19.9 | v1.19.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.19.9 | linux/amd64
linux/arm64 | +| v0.19.8 | v1.19.8 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.19.8 | linux/amd64
linux/arm64 | +| v0.18.9 | v1.18.9 | registry.k8s.io/scheduler-plugins/kube-scheduler:v0.18.9 | linux/amd64 | + +| Controller | Compiled With k8s Version | Container Image | Arch | +|------------|---------------------------|-------------------------------------------------------|----------------------------| +| v0.26.7 | v1.26.7 | registry.k8s.io/scheduler-plugins/controller:v0.26.7 | linux/amd64
linux/arm64 | +| v0.25.12 | v1.25.12 | registry.k8s.io/scheduler-plugins/controller:v0.25.12 | linux/amd64
linux/arm64 | +| v0.24.9 | v1.24.9 | registry.k8s.io/scheduler-plugins/controller:v0.24.9 | linux/amd64
linux/arm64 | +| v0.23.10 | v1.23.10 | registry.k8s.io/scheduler-plugins/controller:v0.23.10 | linux/amd64
linux/arm64 | +| v0.22.6 | v1.22.6 | registry.k8s.io/scheduler-plugins/controller:v0.22.6 | linux/amd64
linux/arm64 | +| v0.21.6 | v1.21.6 | registry.k8s.io/scheduler-plugins/controller:v0.21.6 | linux/amd64
linux/arm64 | +| v0.20.10 | v1.20.10 | registry.k8s.io/scheduler-plugins/controller:v0.20.10 | linux/amd64
linux/arm64 | +| v0.19.9 | v1.19.9 | registry.k8s.io/scheduler-plugins/controller:v0.19.9 | linux/amd64
linux/arm64 | +| v0.19.8 | v1.19.8 | registry.k8s.io/scheduler-plugins/controller:v0.19.8 | linux/amd64
linux/arm64 | + +
## Community, discussion, contribution, and support diff --git a/site/content/en/docs/user-guide/develop.md b/site/content/en/docs/user-guide/develop.md index e712e6e84..8638cdf05 100644 --- a/site/content/en/docs/user-guide/develop.md +++ b/site/content/en/docs/user-guide/develop.md @@ -83,8 +83,7 @@ spec: containers: - image: localhost:5000/scheduler-plugins/kube-scheduler:latest imagePullPolicy: Never - command: - - /bin/kube-scheduler + args: - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf - --config=/etc/kubernetes/configs/scheduler-config.yaml diff --git a/site/content/en/docs/user-guide/installation.md b/site/content/en/docs/user-guide/installation.md index 2e92002df..cf1bc5d89 100644 --- a/site/content/en/docs/user-guide/installation.md +++ b/site/content/en/docs/user-guide/installation.md @@ -8,7 +8,7 @@ weight: 1 - [Create a Kubernetes Cluster](#create-a-kubernetes-cluster) -- [Install release v0.28.9 and use Coscheduling](#install-release-v0289-and-use-coscheduling) +- [Install release v0.29.7 and use Coscheduling](#install-release-v0297-and-use-coscheduling) - [As a second scheduler](#as-a-second-scheduler) - [As a single scheduler (replacing the vanilla default-scheduler)](#as-a-single-scheduler-replacing-the-vanilla-default-scheduler) - [Test Coscheduling](#test-coscheduling) @@ -28,7 +28,7 @@ If you do not have a cluster yet, create one by using one of the following provi * [kubeadm](https://kubernetes.io/docs/reference/setup-tools/kubeadm/) * [minikube](https://minikube.sigs.k8s.io/) -## Install release v0.28.9 and use Coscheduling +## Install release v0.29.7 and use Coscheduling Note: we provide two ways to install the scheduler-plugin artifacts: as a second scheduler and as a single scheduler. Their pros and cons are as below: @@ -150,7 +150,7 @@ any vanilla Kubernetes scheduling capability. Instead, a lot of extra out-of-box > - --kubeconfig=/etc/kubernetes/scheduler.conf > - --leader-elect=true 19,20c20 - < image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 + < image: registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 --- > image: registry.k8s.io/kube-scheduler:v1.28.9 50,52d49 @@ -164,14 +164,14 @@ any vanilla Kubernetes scheduling capability. Instead, a lot of extra out-of-box < name: sched-cc ``` -1. Verify that kube-scheduler pod is running properly with a correct image: `registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9` +1. Verify that kube-scheduler pod is running properly with a correct image: `registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7` ```bash $ kubectl get pod -n kube-system | grep kube-scheduler kube-scheduler-kind-control-plane 1/1 Running 0 3m27s $ kubectl get pods -l component=kube-scheduler -n kube-system -o=jsonpath="{.items[0].spec.containers[0].image}{'\n'}" - registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9 + registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7 ``` > **⚠️Troubleshooting:** If the kube-scheudler is not up, you may need to restart kubelet service inside the kind control plane (`systemctl restart kubelet.service`) diff --git a/site/content/en/docs/user-guide/installing-the-chart.md b/site/content/en/docs/user-guide/installing-the-chart.md index 490871ad5..e74444ee0 100644 --- a/site/content/en/docs/user-guide/installing-the-chart.md +++ b/site/content/en/docs/user-guide/installing-the-chart.md @@ -26,12 +26,14 @@ Quick start instructions for the setup and configuration of as-a-second-schedule ### Installing the chart +> 🆕 Starting v0.28, Helm charts are hosted on https://scheduler-plugins.sigs.k8s.io + #### Install chart using Helm v3.0+ ```bash $ git clone git@github.com:kubernetes-sigs/scheduler-plugins.git $ cd scheduler-plugins/manifests/install/charts -$ helm install scheduler-plugins as-a-second-scheduler/ --create-namespace --namespace scheduler-plugins +$ helm install --repo https://scheduler-plugins.sigs.k8s.io scheduler-plugins scheduler-plugins ``` #### Verify that scheduler and plugin-controller pod are running properly. @@ -50,11 +52,11 @@ The following table lists the configurable parameters of the as-a-second-schedul | Parameter | Description | Default | |---------------------------|-----------------------------|-------------------------------------------------------------------------------------------------| | `scheduler.name` | Scheduler name | `scheduler-plugins-scheduler` | -| `scheduler.image` | Scheduler image | `registry.k8s.io/scheduler-plugins/kube-scheduler:v0.28.9` | +| `scheduler.image` | Scheduler image | `registry.k8s.io/scheduler-plugins/kube-scheduler:v0.29.7` | | `scheduler.leaderElect` | Scheduler leaderElection | `false` | | `scheduler.replicaCount` | Scheduler replicaCount | `1` | | `controller.name` | Controller name | `scheduler-plugins-controller` | -| `controller.image` | Controller image | `registry.k8s.io/scheduler-plugins/controller:v0.28.9` | +| `controller.image` | Controller image | `registry.k8s.io/scheduler-plugins/controller:v0.29.7` | | `controller.replicaCount` | Controller replicaCount | `1` | | `plugins.enabled` | Plugins enabled by default | `["Coscheduling","CapacityScheduling","NodeResourceTopologyMatch", "NodeResourcesAllocatable"]` | | `plugins.disabled` | Plugins disabled by default | `["PrioritySort"]` | diff --git a/site/static/index.yaml b/site/static/index.yaml index 566388e68..826f59227 100644 --- a/site/static/index.yaml +++ b/site/static/index.yaml @@ -1,6 +1,16 @@ apiVersion: v1 entries: scheduler-plugins: + - apiVersion: v2 + appVersion: 0.29.7 + created: "2024-07-29T15:54:48.355364-07:00" + description: deploy scheduler plugin as a second scheduler in cluster + digest: 728fd4f18759fb8d154f3fb531d8bbc9488df8f58052a6427f9bf068769cab28 + name: scheduler-plugins + type: application + urls: + - https://github.com/kubernetes-sigs/scheduler-plugins/releases/download/v0.29.7/scheduler-plugins-0.29.7.tgz + version: 0.29.7 - apiVersion: v2 appVersion: 0.28.8 created: "2024-05-15T01:12:05.511605+08:00" @@ -11,4 +21,4 @@ entries: urls: - https://github.com/kubernetes-sigs/scheduler-plugins/releases/download/v0.28.9/scheduler-plugins-0.28.9.tgz version: 0.28.8 -generated: "2024-05-15T01:12:05.511057+08:00" +generated: "2024-07-29T15:54:48.354857-07:00" diff --git a/test/integration/networkoverhead_test.go b/test/integration/networkoverhead_test.go index 2671597c2..26c3ed775 100644 --- a/test/integration/networkoverhead_test.go +++ b/test/integration/networkoverhead_test.go @@ -181,8 +181,9 @@ func TestNetworkOverheadPlugin(t *testing.T) { }, }, ).Status(agv1alpha1.AppGroupStatus{ - RunningWorkloads: 3, - ScheduleStartTime: metav1.Time{time.Now()}, TopologyCalculationTime: metav1.Time{time.Now()}, + RunningWorkloads: 3, + ScheduleStartTime: metav1.Now(), + TopologyCalculationTime: metav1.Now(), TopologyOrder: agv1alpha1.AppGroupTopologyList{ agv1alpha1.AppGroupTopologyInfo{ Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p1", Selector: "p1", APIVersion: "apps/v1", Namespace: "default"}, Index: 1}, diff --git a/test/integration/topologicalsort_test.go b/test/integration/topologicalsort_test.go index 83364ebd1..762edfc4c 100644 --- a/test/integration/topologicalsort_test.go +++ b/test/integration/topologicalsort_test.go @@ -144,8 +144,9 @@ func TestTopologicalSortPlugin(t *testing.T) { }, }, ).Status(agv1alpha1.AppGroupStatus{ - RunningWorkloads: 3, - ScheduleStartTime: metav1.Time{time.Now()}, TopologyCalculationTime: metav1.Time{time.Now()}, + RunningWorkloads: 3, + ScheduleStartTime: metav1.Now(), + TopologyCalculationTime: metav1.Now(), TopologyOrder: agv1alpha1.AppGroupTopologyList{ agv1alpha1.AppGroupTopologyInfo{ Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p1", Selector: "p1", APIVersion: "apps/v1", Namespace: "default"}, Index: 1}, @@ -221,8 +222,9 @@ func TestTopologicalSortPlugin(t *testing.T) { }, }, ).Status(agv1alpha1.AppGroupStatus{ - RunningWorkloads: 3, - ScheduleStartTime: metav1.Time{time.Now()}, TopologyCalculationTime: metav1.Time{time.Now()}, + RunningWorkloads: 3, + ScheduleStartTime: metav1.Now(), + TopologyCalculationTime: metav1.Now(), TopologyOrder: agv1alpha1.AppGroupTopologyList{ agv1alpha1.AppGroupTopologyInfo{Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p1", Selector: "p1", APIVersion: "apps/v1", Namespace: "default"}, Index: 1}, agv1alpha1.AppGroupTopologyInfo{Workload: agv1alpha1.AppGroupWorkloadInfo{Kind: "Deployment", Name: "p10", Selector: "p10", APIVersion: "apps/v1", Namespace: "default"}, Index: 2}, diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go new file mode 100644 index 000000000..e9bb0efe7 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go @@ -0,0 +1,1385 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +var ds = sync.Pool{ + New: func() any { + return new(decodeState) + }, +} + +func UnmarshalWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return nil, err + } + + d.init(data) + err = d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +func UnmarshalValid(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + return d.unmarshal(v) +} + +func UnmarshalValidWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + err := d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Pointer { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v any) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + lastKeys []string +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() any { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Pointer { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } + + var keys []string + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + keys = append(keys, string(key)) + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + if i, ok := fields.nameIndex[string(key)]; ok { + // Found an exact name match. + f = &fields.list[i] + } else { + // Fall back to the expensive case-insensitive + // linear search. + for i := range fields.list { + ff := &fields.list[i] + if ff.equalFold(ff.nameBytes, key) { + f = ff + break + } + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Pointer { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) + d.errorContext.Struct = t + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + switch { + case reflect.PointerTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + + if v.Kind() == reflect.Map { + d.lastKeys = keys + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (any, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + if v.Type() == numberType && !isValidNumber(string(s)) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val any) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []any { + var v = make([]any, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]any { + m := make(map[string]any) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() any { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go new file mode 100644 index 000000000..2e6eca448 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go @@ -0,0 +1,1486 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML