From ebd0aa011a8cfbfabbcf1969b119daf4d71cfc11 Mon Sep 17 00:00:00 2001 From: l-technicore Date: Thu, 28 Nov 2024 10:08:15 +0530 Subject: [PATCH] Release 1.29.1 Security patching, updated Makefile, Readme and Third Party Licences --- .github/workflows/makefile.yml | 11 +- .github/workflows/release.yaml | 4 +- Dockerfile | 4 +- Dockerfile_arm_all | 4 +- Makefile | 2 +- README.md | 8 +- THIRD_PARTY_LICENSES.txt | 12 +- go.mod | 84 ++--- go.sum | 76 ++-- .../oci-cloud-controller-manager.yaml | 2 +- .../oci-csi-controller-driver.yaml | 6 +- .../oci-csi-node-driver.yaml | 6 +- .../oci-csi-node-rbac.yaml | 3 + .../oci-flexvolume-driver.yaml | 4 +- .../oci-volume-provisioner-fss.yaml | 2 +- .../oci-volume-provisioner.yaml | 2 +- pkg/cloudprovider/providers/oci/instances.go | 2 +- pkg/oci/client/client.go | 72 +--- pkg/oci/client/client_factory.go | 4 +- pkg/oci/client/identity.go | 18 + pkg/volume/provisioner/core/provisioner.go | 2 +- test/e2e/cloud-provider-oci/fss_dynamic.go | 32 +- .../v65/common/auth/federation_client.go | 14 +- .../oci-go-sdk/v65/common/circuit_breaker.go | 27 +- .../oracle/oci-go-sdk/v65/common/regions.go | 18 + .../oracle/oci-go-sdk/v65/common/regions.json | 24 ++ .../oracle/oci-go-sdk/v65/common/version.go | 2 +- .../containerengine/install_addon_details.go | 3 + .../loadbalancer/create_listener_details.go | 5 +- .../oci-go-sdk/v65/loadbalancer/listener.go | 5 +- .../v65/loadbalancer/listener_details.go | 5 +- .../loadbalancer/update_listener_details.go | 5 +- .../v65/networkloadbalancer/backend_set.go | 1 - .../backend_set_details.go | 2 - .../backend_set_summary.go | 6 +- .../create_listener_details.go | 4 + .../v65/networkloadbalancer/listener.go | 4 + .../networkloadbalancer/listener_details.go | 4 + .../networkloadbalancer/listener_protocols.go | 4 + .../networkloadbalancer/listener_summary.go | 4 + ...network_load_balancers_protocol_summary.go | 4 + .../update_listener_details.go | 4 + .../otel/metric/noop/noop.go | 264 ++++++++++++++ vendor/golang.org/x/net/http2/frame.go | 31 ++ vendor/golang.org/x/net/http2/pipe.go | 11 +- vendor/golang.org/x/net/http2/server.go | 13 +- vendor/golang.org/x/net/http2/testsync.go | 331 ++++++++++++++++++ vendor/golang.org/x/net/http2/transport.go | 307 ++++++++++++---- vendor/golang.org/x/net/websocket/client.go | 55 ++- vendor/golang.org/x/net/websocket/dial.go | 11 +- .../admissionregistration/v1alpha1/types.go | 4 +- .../admissionregistration/v1beta1/types.go | 4 +- vendor/k8s.io/api/batch/v1/types.go | 6 +- vendor/k8s.io/api/core/v1/generated.proto | 2 +- vendor/k8s.io/api/core/v1/types.go | 2 +- .../core/v1/types_swagger_doc_generated.go | 2 +- .../pkg/util/httpstream/wsstream/conn.go | 2 +- .../controller_reconcile.go | 5 +- .../apiserver/pkg/features/kube_features.go | 17 + vendor/k8s.io/apiserver/pkg/server/config.go | 2 +- .../apiserver/pkg/server/options/tracing.go | 8 + .../apiserver/pkg/storage/cacher/cacher.go | 24 +- .../pkg/storage/cacher/lister_watcher.go | 30 +- .../pkg/storage/cacher/watch_cache.go | 10 +- .../storage/cacher/watch_cache_interval.go | 17 + .../pkg/storage/cacher/watch_progress.go | 9 +- .../pkg/storage/etcd3/metrics/metrics.go | 12 +- .../tools/remotecommand/websocket.go | 23 +- .../metrics/prometheus/slis/metrics.go | 1 + .../k8s.io/kubernetes/pkg/apis/core/types.go | 2 +- .../pkg/apis/core/validation/validation.go | 42 ++- .../kubernetes/pkg/features/kube_features.go | 5 + .../pkg/scheduler/framework/interface.go | 5 +- .../pkg/scheduler/framework/types.go | 8 + .../pkg/scheduler/metrics/metrics.go | 2 +- .../pkg/util/filesystem/defaultfs.go | 5 +- .../pkg/util/filesystem/util_unix.go | 10 + .../pkg/util/filesystem/util_windows.go | 156 +++++++++ .../k8s.io/kubernetes/pkg/volume/plugins.go | 2 +- .../kubernetes/pkg/volume/util/types/types.go | 17 + .../scheduling/nvidia-driver-installer.yaml | 69 +++- .../storage-csi/external-attacher/rbac.yaml | 4 +- .../rbac.yaml | 4 +- .../external-provisioner/rbac.yaml | 14 +- .../storage-csi/external-resizer/rbac.yaml | 10 +- .../csi-snapshotter/rbac-csi-snapshotter.yaml | 20 +- .../storage-csi/hostpath/README.md | 2 +- .../hostpath/csi-hostpath-driverinfo.yaml | 3 + .../hostpath/csi-hostpath-plugin.yaml | 20 +- .../hostpath/csi-hostpath-testing.yaml | 6 +- .../mock/csi-mock-driver-attacher.yaml | 2 +- .../mock/csi-mock-driver-resizer.yaml | 2 +- .../mock/csi-mock-driver-snapshotter.yaml | 2 +- .../storage-csi/mock/csi-mock-driver.yaml | 6 +- .../storage-csi/mock/csi-mock-proxy.yaml | 6 +- .../storage-csi/update-hostpath.sh | 2 +- .../kubernetes/test/utils/image/manifest.go | 6 +- .../k8s.io/mount-utils/mount_helper_unix.go | 8 +- vendor/modules.txt | 87 +++-- 99 files changed, 1790 insertions(+), 445 deletions(-) create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/noop.go create mode 100644 vendor/golang.org/x/net/http2/testsync.go diff --git a/.github/workflows/makefile.yml b/.github/workflows/makefile.yml index ad30f6ef3c..29eacacbde 100644 --- a/.github/workflows/makefile.yml +++ b/.github/workflows/makefile.yml @@ -12,18 +12,21 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.21' id: go - name: Check out code into the Go module directory uses: actions/checkout@v2 - + - name: Install dependencies + run: | + go mod download - name: Run Unit Tests run: | go test -covermode=count -coverprofile=profile.cov ./pkg/... + - name: Install goveralls + run: go install github.com/mattn/goveralls@latest - name: Send coverage env: COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - GO111MODULE=off go get github.com/mattn/goveralls - $(go env GOPATH)/bin/goveralls -coverprofile=profile.cov -service=github + goveralls -coverprofile=profile.cov -service=github diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 56591c42cb..3e569eaf20 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -26,7 +26,7 @@ jobs: run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${GITHUB_ACTOR,,} --password-stdin - name: Build Image - run: OSS_REGISTRY="ghcr.io/oracle" VERSION="${{ github.ref_name }}" make image + run: OSS_REGISTRY="ghcr.io/${GITHUB_REPOSITORY_OWNER,,}" VERSION="${{ github.ref_name }}" make image - name: Push Image - run: OSS_REGISTRY="ghcr.io/oracle" VERSION="${{ github.ref_name }}" make docker-push-all + run: OSS_REGISTRY="ghcr.io/${GITHUB_REPOSITORY_OWNER,,}" VERSION="${{ github.ref_name }}" make docker-push-all diff --git a/Dockerfile b/Dockerfile index 22522e500d..50649c3bfe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,7 @@ WORKDIR $SRC RUN COMPONENT=${COMPONENT} make clean build -FROM oraclelinux:8-slim +FROM ghcr.io/oracle/oraclelinux:8-slim-fips COPY --from=0 /go/src/github.com/oracle/oci-cloud-controller-manager/dist/* /usr/local/bin/ COPY --from=0 /go/src/github.com/oracle/oci-cloud-controller-manager/image/* /usr/local/bin/ @@ -45,4 +45,4 @@ RUN chmod 755 /sbin/encrypt-umount RUN chmod 755 /sbin/rpm-host RUN chmod 755 /sbin/chroot-bash -COPY --from=0 /go/src/github.com/oracle/oci-cloud-controller-manager/dist/* /usr/local/bin/ \ No newline at end of file +COPY --from=0 /go/src/github.com/oracle/oci-cloud-controller-manager/dist/* /usr/local/bin/ diff --git a/Dockerfile_arm_all b/Dockerfile_arm_all index 1bdf8f9cf8..3739e3a2b0 100644 --- a/Dockerfile_arm_all +++ b/Dockerfile_arm_all @@ -14,7 +14,7 @@ WORKDIR $SRC RUN ARCH=arm make clean build-arm-all -FROM arm64v8/oraclelinux:8-slim +FROM ghcr.io/oracle/oraclelinux:8-slim-fips-arm64v8 RUN microdnf -y install util-linux e2fsprogs xfsprogs python2 && \ microdnf update && \ @@ -29,4 +29,4 @@ RUN chmod 755 /sbin/encrypt-umount RUN chmod 755 /sbin/rpm-host RUN chmod 755 /sbin/chroot-bash -COPY --from=0 /go/src/github.com/oracle/oci-cloud-controller-manager/dist/arm/* /usr/local/bin/ \ No newline at end of file +COPY --from=0 /go/src/github.com/oracle/oci-cloud-controller-manager/dist/arm/* /usr/local/bin/ diff --git a/Makefile b/Makefile index 676b96c54f..dc786498c9 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ else VERSION ?= ${VERSION} endif -RELEASE = v1.29.0 +RELEASE = v1.29.1 GOOS ?= linux ARCH ?= amd64 diff --git a/README.md b/README.md index 75586cebf2..9babc62253 100644 --- a/README.md +++ b/README.md @@ -33,13 +33,13 @@ cloud-provider specific code out of the Kubernetes codebase. | v1.25.2 | v1.25 | - | | v1.26.4 | v1.26 | - | | v1.27.3 | v1.27 | - | -| v1.28.1 | v1.28 | - | -| v1.29.0 | v1.29 | - | - +| v1.28.2 | v1.28 | - | +| v1.29.1 | v1.29 | - | +| v1.30.0 | v1.30 | - | Note: -Versions older than v1.27.3 are no longer supported, new features / bug fixes will be available in v1.27.3 and later. +Versions older than v1.28.2 are no longer supported, new features / bug fixes will be available in v1.28.2 and later. ## Implementation Currently `oci-cloud-controller-manager` implements: diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt index e38c13036a..d7acf0d183 100644 --- a/THIRD_PARTY_LICENSES.txt +++ b/THIRD_PARTY_LICENSES.txt @@ -1179,7 +1179,6 @@ SPDX:MIT == Copyright Copyright (c) 2013-2014 Onsi Fakhouri -Copyright (c) 2014 Amit Kumar Gupta --------------------------------- (separator) ---------------------------------- @@ -1312,16 +1311,12 @@ END OF TERMS AND CONDITIONS == Copyright -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. -Copyright (c) 2013 The Go Authors. All rights reserved. Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2018, 2024, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2023 Oracle and/or its affiliates. Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. -Copyright © 2012-2020 Mat Ryer, Tyler Bunnell and contributors. -Copyright © 2013 The Go Authors. All rights reserved. == Notices Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. @@ -2027,7 +2022,6 @@ go.uber.org/zap SPDX:MIT == Copyright -Copyright (c) "*" Uber Technologies, Inc.") Copyright (c) 2016 Uber Technologies, Inc. Copyright (c) 2016, 2017 Uber Technologies, Inc. Copyright (c) 2016-2017 Uber Technologies, Inc. @@ -2170,6 +2164,7 @@ Copyright 2020 The Go Authors. All rights reserved. Copyright 2021 The Go Authors. All rights reserved. Copyright 2022 The Go Authors. All rights reserved. Copyright 2023 The Go Authors. All rights reserved. +Copyright 2024 The Go Authors. All rights reserved. == Patents Additional IP Rights Grant (Patents) @@ -3067,6 +3062,7 @@ Copyright 2020 The Kubernetes Authors. Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors. Copyright 2023 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. --------------------------------- (separator) ---------------------------------- @@ -4324,5 +4320,5 @@ the Mozilla Public License, v. 2.0. === ATTRIBUTION-HELPER-GENERATED: -=== Attribution helper version: {Major:0 Minor:11 GitVersion: GitCommit: GitTreeState:dirty BuildDate:1970-01-01T00:00:00Z GoVersion:go1.19.3 Compiler:gc Platform:darwin/arm64} -=== License file based on go.mod with md5 sum: 5ba4389f9d7406b21218d714f3f79d86 +=== Attribution helper version: {Major:0 Minor:11 GitVersion:0.10.0-114-g3747dab9 GitCommit:3747dab92eb29c0dbe6409ffbb824b9ae3a04b87 GitTreeState:dirty BuildDate:2024-02-28T16:52:52Z GoVersion:go1.21.0 Compiler:gc Platform:darwin/amd64} +=== License file based on go.mod with md5 sum: ea9bd465882b674ff1025b18349e102f diff --git a/go.mod b/go.mod index bad8ea59a6..cf24bb9ccc 100644 --- a/go.mod +++ b/go.mod @@ -6,35 +6,27 @@ replace ( github.com/docker/docker => github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0 google.golang.org/grpc => google.golang.org/grpc v1.60.1 - k8s.io/api => k8s.io/api v0.29.1 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.1 - k8s.io/apimachinery => k8s.io/apimachinery v0.29.1 - k8s.io/apiserver => k8s.io/apiserver v0.29.1 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.1 - k8s.io/client-go => k8s.io/client-go v0.29.1 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.1 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.1 - k8s.io/code-generator => k8s.io/code-generator v0.29.1 - k8s.io/component-base => k8s.io/component-base v0.29.1 - k8s.io/component-helpers => k8s.io/component-helpers v0.29.1 - k8s.io/controller-manager => k8s.io/controller-manager v0.29.1 - k8s.io/cri-api => k8s.io/cri-api v0.29.1 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.1 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.1 + k8s.io/api => k8s.io/api v0.29.11 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.11 + k8s.io/apimachinery => k8s.io/apimachinery v0.29.11 + k8s.io/apiserver => k8s.io/apiserver v0.29.11 + k8s.io/client-go => k8s.io/client-go v0.29.11 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.11 + k8s.io/component-base => k8s.io/component-base v0.29.11 + k8s.io/component-helpers => k8s.io/component-helpers v0.29.11 + k8s.io/controller-manager => k8s.io/controller-manager v0.29.11 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.11 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.11 k8s.io/endpointslice => k8s.io/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20230810203337-add7e14df11e - k8s.io/kms => k8s.io/kms v0.29.1 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.1 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.1 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.1 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.1 - k8s.io/kubectl => k8s.io/kubectl v0.29.1 - k8s.io/kubelet => k8s.io/kubelet v0.29.1 - k8s.io/kubernetes => k8s.io/kubernetes v1.29.1 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.1 - k8s.io/metrics => k8s.io/metrics v0.29.1 - k8s.io/mount-utils => k8s.io/mount-utils v0.29.1 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.1 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.1 + k8s.io/kms => k8s.io/kms v0.29.11 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.11 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.11 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.11 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.11 + k8s.io/kubectl => k8s.io/kubectl v0.29.11 + k8s.io/kubelet => k8s.io/kubelet v0.29.11 + k8s.io/kubernetes => k8s.io/kubernetes v1.29.11 + k8s.io/mount-utils => k8s.io/mount-utils v0.29.11 ) require ( @@ -42,35 +34,35 @@ require ( github.com/go-logr/zapr v1.2.4 // indirect github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.17.0 - github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.4 + github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.30.0 - github.com/oracle/oci-go-sdk/v65 v65.77.0 + github.com/oracle/oci-go-sdk/v65 v65.79.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.17.0 github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.8.1 go.uber.org/zap v1.26.0 - golang.org/x/net v0.21.0 + golang.org/x/net v0.23.0 golang.org/x/sys v0.18.0 // indirect google.golang.org/grpc v1.60.1 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/apiserver v0.29.1 // indirect + k8s.io/api v0.29.11 + k8s.io/apimachinery v0.29.11 + k8s.io/apiserver v0.29.11 // indirect k8s.io/client-go v1.5.2 - k8s.io/cloud-provider v0.29.1 - k8s.io/component-base v0.29.1 - k8s.io/component-helpers v0.29.1 - k8s.io/controller-manager v0.29.1 // indirect - k8s.io/csi-translation-lib v0.29.1 // indirect + k8s.io/cloud-provider v0.29.11 + k8s.io/component-base v0.29.11 + k8s.io/component-helpers v0.29.11 + k8s.io/controller-manager v0.29.11 // indirect + k8s.io/csi-translation-lib v0.29.11 // indirect k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.110.1 - k8s.io/kubelet v0.29.1 // indirect - k8s.io/kubernetes v1.29.1 - k8s.io/mount-utils v0.29.1 + k8s.io/kubelet v0.29.11 // indirect + k8s.io/kubernetes v1.29.11 + k8s.io/mount-utils v0.29.11 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/sig-storage-lib-external-provisioner/v9 v9.1.0-rc.0 ) @@ -80,7 +72,7 @@ require ( golang.org/x/sync v0.5.0 google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/apiextensions-apiserver v0.29.1 + k8s.io/apiextensions-apiserver v0.29.11 ) require ( @@ -180,11 +172,11 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - k8s.io/kms v0.29.1 // indirect + k8s.io/kms v0.29.11 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/kube-scheduler v0.0.0 // indirect - k8s.io/kubectl v0.29.1 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.3 // indirect + k8s.io/kubectl v0.29.11 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index 30450cc136..6c6f6097d8 100644 --- a/go.sum +++ b/go.sum @@ -1205,8 +1205,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/oracle/oci-go-sdk/v65 v65.77.0 h1:lFdbyJq7/VsB9gAIemTGx/8sSK3uYEKlQTf5FaTnpTs= -github.com/oracle/oci-go-sdk/v65 v65.77.0/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0= +github.com/oracle/oci-go-sdk/v65 v65.79.0 h1:Tv9L1XTKWkdXtSViMbP+dA93WunquvW++/2s5pOvOgU= +github.com/oracle/oci-go-sdk/v65 v65.79.0/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -1511,8 +1511,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2081,44 +2081,44 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g= -k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -k8s.io/cloud-provider v0.29.1 h1:bDLpOSpysWrtU2PCkvyP2sUTwRBa6MGCmxt68CRRW/8= -k8s.io/cloud-provider v0.29.1/go.mod h1:u50Drm6AbuoKpsVbAstNiFHGgbSVHuJV4TWN5imdM2w= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= -k8s.io/component-helpers v0.29.1 h1:54MMEDu6xeJmMtAKztsPwu0kJKr4+jCUzaEIn2UXRoc= -k8s.io/component-helpers v0.29.1/go.mod h1:+I7xz4kfUgxWAPJIVKrqe4ml4rb9UGpazlOmhXYo+cY= -k8s.io/controller-manager v0.29.1 h1:bTnJFF/OWooRVeJ4QLA1ApuPH+fjHSmcVMMeL7qvI2E= -k8s.io/controller-manager v0.29.1/go.mod h1:fVhGGuBiB0B2yT2+OHXZaA88owVn5zkv18A+G9E9Qlw= -k8s.io/csi-translation-lib v0.29.1 h1:b2tYZnnHyrQVHG6GYel7egmVvKeIlX/xbTNm9ynBSUg= -k8s.io/csi-translation-lib v0.29.1/go.mod h1:Zglui6PgFSew8ux50djwZ3PFK6eNrWktid66D7pHDDo= +k8s.io/api v0.29.11 h1:6FwDo33f1WX5Yu0RQTX9YAd3wth8Ik0B4SXQKsoQfbk= +k8s.io/api v0.29.11/go.mod h1:3TDAW1OpFbz/Yx5r0W06b6eiAfHEwtH61VYDzpTU4Ng= +k8s.io/apiextensions-apiserver v0.29.11 h1:ytJJQ8EK0GzPa80tnPkfDoBGoNPMwqfaSWwg4FKmbEU= +k8s.io/apiextensions-apiserver v0.29.11/go.mod h1:eqKsza/nErdFNltXoVZmRt9vX99ooDLDMTcIcOG0ueg= +k8s.io/apimachinery v0.29.11 h1:55+6ue9advpA7T0sX2ZJDHCLKuiFfrAAR/39VQN9KEQ= +k8s.io/apimachinery v0.29.11/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apiserver v0.29.11 h1:EXcv4/3iIKWG5tWI2ywdMY86jpxYw6WDAdMrBKUMkSc= +k8s.io/apiserver v0.29.11/go.mod h1:lnoWXh0J75ei1h4/F5ZbOEd7byiAVasherLB6Snzl/I= +k8s.io/client-go v0.29.11 h1:mBX7Ub0uqpLMwWz3J/AGS/xKOZsjr349qZ1vxVoL1l8= +k8s.io/client-go v0.29.11/go.mod h1:WOEoi/eLg2YEg3/yEd7YK3CNScYkM8AEScQadxUnaTE= +k8s.io/cloud-provider v0.29.11 h1:TrpRhwL1zJupJDi9mXRxUNvqkHm03U7RoQpFA7SLwc4= +k8s.io/cloud-provider v0.29.11/go.mod h1:MTGplcJnpST/nGFbhb5zGhdI23wGjEEaEi9G+17qgPk= +k8s.io/component-base v0.29.11 h1:H3GJIyDNPrscvXGP6wx+9gApcwwmrUd0YtCGp5BcHBA= +k8s.io/component-base v0.29.11/go.mod h1:0qu1WStER4wu5o8RMRndZUWPVcPH1XBy/QQiDcD6lew= +k8s.io/component-helpers v0.29.11 h1:GdZaSLBLlCa+EzjAnpZ4fGB75rA3qqPLLZKk+CsqNyo= +k8s.io/component-helpers v0.29.11/go.mod h1:gloyih9IiE4Qy/7iLUXqAmxYSUduuIpMCiNYuHfYvD4= +k8s.io/controller-manager v0.29.11 h1:gJkqYKh56fKnZz6lYXByPdMvxYynBukePXm1vpMsVLg= +k8s.io/controller-manager v0.29.11/go.mod h1:K45e2ADRO3o4RD5LaefDS/h/VwwG6uJkTa3mEiChe/4= +k8s.io/csi-translation-lib v0.29.11 h1:G2tMJmh0VgDs93wS3sBKm1+igP/9A2asUs+VKF+lfNg= +k8s.io/csi-translation-lib v0.29.11/go.mod h1:lJX2ZB89tPHQV2ITLxpkYWyfrHv5oRQf+xzjwkBNwxU= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kms v0.29.1 h1:6dMOaxllwiAZ8p3Hys65b78MDG+hONpBBpk1rQsaEtk= -k8s.io/kms v0.29.1/go.mod h1:Hqkx3zEGWThUTbcSkK508DUv4c1HOJOB5qihSoLBWgU= +k8s.io/kms v0.29.11 h1:pylaiDJhgfqczvcjMDPI89+VH0OVoGQhscPH1VbBzQE= +k8s.io/kms v0.29.11/go.mod h1:vWVImKkJd+1BQY4tBwdfSwjQBiLrnbNtHADcDEDQFtk= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kube-scheduler v0.29.1 h1:EKhEBriMl5t/NVjPjUr4he11ghe5BZocur49NOXIrWk= -k8s.io/kube-scheduler v0.29.1/go.mod h1:MQhjK51HUNq0WQ2z+qRWgEnDwD7/XQm3y9XfvrNSmek= -k8s.io/kubectl v0.29.1 h1:rWnW3hi/rEUvvg7jp4iYB68qW5un/urKbv7fu3Vj0/s= -k8s.io/kubectl v0.29.1/go.mod h1:SZzvLqtuOJYSvZzPZR9weSuP0wDQ+N37CENJf0FhDF4= -k8s.io/kubelet v0.29.1 h1:cso8Dk8dymkj8q+EvW/aCbIYU2aOkH27gho48tYza/8= -k8s.io/kubelet v0.29.1/go.mod h1:hTl/naFcCVG1Ku17fMgj/krbheBwBkf3gnFhaboMx7E= -k8s.io/kubernetes v1.29.1 h1:fxJFVb8uqbYZDYHpwIsAndBQs360cQGb0xa1gYFh3fo= -k8s.io/kubernetes v1.29.1/go.mod h1:xZPKU0yO0CBbLTnbd+XGyRmmtmaVuJykDb8gNCkeeUE= -k8s.io/mount-utils v0.29.1 h1:veXlIm52Y4tm3H0pG03cOdkw0KOJxYDa0fQqhJCoqvQ= -k8s.io/mount-utils v0.29.1/go.mod h1:9IWJTMe8tG0MYMLEp60xK9GYVeCdA3g4LowmnVi+t9Y= +k8s.io/kube-scheduler v0.29.11 h1:smH2FQSEj5tEJgOX6RSuKZZHEC+6Lmruhr/ipEBCgRU= +k8s.io/kube-scheduler v0.29.11/go.mod h1:ALv5HFRjvFwIvsLejKfJXDYyWxYn6d5C2xYzVRKHMSc= +k8s.io/kubectl v0.29.11 h1:rxflwYQ1kmeEUVPWNevKLTtWjNfLrFSzLRZJoPolguU= +k8s.io/kubectl v0.29.11/go.mod h1:b6IhZyA/zp7q6kbiYfm5B3xwVPodVUvpfN6VG0LwA30= +k8s.io/kubelet v0.29.11 h1:V06Kr+CO5m6t4fdK+ZUfGBGB+9n5EE4DyMVflaRV34Q= +k8s.io/kubelet v0.29.11/go.mod h1:PCUTi0GGtVmewNoFeUiM5IGv3BKSNcuFogaMxiKEWmg= +k8s.io/kubernetes v1.29.11 h1:8JR33gU38Zq1FS7GvOOxDEgwR+OfjI5umSitd7aAqak= +k8s.io/kubernetes v1.29.11/go.mod h1:L6/pfKQZ6Tv2O8gyT4OxhGZp+nNsjV54xtNodRoup9k= +k8s.io/mount-utils v0.29.11 h1:8CG16bFCo7uhHj1HrQAzGCUzdVrLiWdZKQAxAUDMteQ= +k8s.io/mount-utils v0.29.11/go.mod h1:SHUMR9n3b6tLgEmlyT36cL6fV6Sjwa5CJhc0guCXvb0= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= @@ -2176,8 +2176,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.3 h1:IYXtJZpv6oAlx8Als8uIkxq2P3BlvqQfS8dt65obcco= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.3/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/sig-storage-lib-external-provisioner/v9 v9.1.0-rc.0 h1:0aLQSafwBXlXTPiA9wJK5wEDsgYUh5uJlKHpBw9dwCk= diff --git a/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml b/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml index 2fe6246b62..817dc97d19 100644 --- a/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml +++ b/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml @@ -42,7 +42,7 @@ spec: path: /etc/kubernetes containers: - name: oci-cloud-controller-manager - image: ghcr.io/oracle/cloud-provider-oci:v1.29.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.29.1 command: ["/usr/local/bin/oci-cloud-controller-manager"] args: - --cloud-config=/etc/oci/cloud-provider.yaml diff --git a/manifests/container-storage-interface/oci-csi-controller-driver.yaml b/manifests/container-storage-interface/oci-csi-controller-driver.yaml index fda4d5254b..0106bc9e02 100644 --- a/manifests/container-storage-interface/oci-csi-controller-driver.yaml +++ b/manifests/container-storage-interface/oci-csi-controller-driver.yaml @@ -74,7 +74,7 @@ spec: - mountPath: /var/run/shared-tmpfs name: shared-tmpfs - name: snapshot-controller - image: registry.k8s.io/sig-storage/snapshot-controller:v6.2.0 + image: registry.k8s.io/sig-storage/snapshot-controller:v6.3.0 args: - --leader-election imagePullPolicy: "IfNotPresent" @@ -82,7 +82,7 @@ spec: - mountPath: /var/run/shared-tmpfs name: shared-tmpfs - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.3.0 args: - --csi-address=/var/run/shared-tmpfs/csi.sock - --leader-election @@ -96,7 +96,7 @@ spec: - --fss-csi-endpoint=unix://var/run/shared-tmpfs/csi-fss.sock command: - /usr/local/bin/oci-csi-controller-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.29.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.29.1 imagePullPolicy: IfNotPresent volumeMounts: - name: config diff --git a/manifests/container-storage-interface/oci-csi-node-driver.yaml b/manifests/container-storage-interface/oci-csi-node-driver.yaml index 79c0342020..a57e879d43 100644 --- a/manifests/container-storage-interface/oci-csi-node-driver.yaml +++ b/manifests/container-storage-interface/oci-csi-node-driver.yaml @@ -117,7 +117,7 @@ spec: fieldPath: spec.nodeName - name: PATH value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/host/usr/bin:/host/sbin - image: ghcr.io/oracle/cloud-provider-oci:v1.29.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.29.1 securityContext: privileged: true volumeMounts: @@ -152,7 +152,7 @@ spec: args: - --csi-address=/csi/csi.sock - --kubelet-registration-path=/var/lib/kubelet/plugins/blockvolume.csi.oraclecloud.com/csi.sock - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0 securityContext: privileged: true lifecycle: @@ -171,7 +171,7 @@ spec: args: - --csi-address=/fss/csi.sock - --kubelet-registration-path=/var/lib/kubelet/plugins/fss.csi.oraclecloud.com/csi.sock - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.0 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0 securityContext: privileged: true lifecycle: diff --git a/manifests/container-storage-interface/oci-csi-node-rbac.yaml b/manifests/container-storage-interface/oci-csi-node-rbac.yaml index 36de8dcd7a..cc9ab5ef0f 100644 --- a/manifests/container-storage-interface/oci-csi-node-rbac.yaml +++ b/manifests/container-storage-interface/oci-csi-node-rbac.yaml @@ -56,6 +56,9 @@ rules: - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots/status" ] verbs: [ "update", "patch" ] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create"] --- kind: ClusterRoleBinding diff --git a/manifests/flexvolume-driver/oci-flexvolume-driver.yaml b/manifests/flexvolume-driver/oci-flexvolume-driver.yaml index 46209b0a69..d7dcf41c5d 100644 --- a/manifests/flexvolume-driver/oci-flexvolume-driver.yaml +++ b/manifests/flexvolume-driver/oci-flexvolume-driver.yaml @@ -40,7 +40,7 @@ spec: secretName: oci-flexvolume-driver containers: - name: oci-flexvolume-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.29.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.29.1 command: ["/usr/local/bin/install.py", "-c", "/tmp/config.yaml"] securityContext: privileged: true @@ -76,7 +76,7 @@ spec: type: DirectoryOrCreate containers: - name: oci-flexvolume-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.29.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.29.1 command: ["/usr/local/bin/install.py"] securityContext: privileged: true diff --git a/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml b/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml index 613fbd2ce3..1c8d899700 100644 --- a/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml +++ b/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml @@ -35,7 +35,7 @@ spec: secretName: oci-volume-provisioner containers: - name: oci-volume-provisioner - image: ghcr.io/oracle/cloud-provider-oci:v1.29.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.29.1 command: ["/usr/local/bin/oci-volume-provisioner"] env: - name: NODE_NAME diff --git a/manifests/volume-provisioner/oci-volume-provisioner.yaml b/manifests/volume-provisioner/oci-volume-provisioner.yaml index 7c754a50f5..ff39b7e6ce 100644 --- a/manifests/volume-provisioner/oci-volume-provisioner.yaml +++ b/manifests/volume-provisioner/oci-volume-provisioner.yaml @@ -35,7 +35,7 @@ spec: secretName: oci-volume-provisioner containers: - name: oci-volume-provisioner - image: ghcr.io/oracle/cloud-provider-oci:v1.29.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.29.1 command: ["/usr/local/bin/oci-volume-provisioner"] env: - name: NODE_NAME diff --git a/pkg/cloudprovider/providers/oci/instances.go b/pkg/cloudprovider/providers/oci/instances.go index 7c5100426e..e3540e9b97 100644 --- a/pkg/cloudprovider/providers/oci/instances.go +++ b/pkg/cloudprovider/providers/oci/instances.go @@ -376,7 +376,7 @@ func (cp *CloudProvider) checkForAuthorizationError(ctx context.Context, instanc return false, err } // to eliminate AD specific issues, list all ADs and make AD specific requests - availabilityDomains, err := cp.client.Identity().ListAvailabilityDomains(ctx, compartmentId) + availabilityDomains, err := cp.client.Identity(nil).ListAvailabilityDomains(ctx, compartmentId) for _, availabilityDomain := range availabilityDomains { instances, err := cp.client.Compute().ListInstancesByCompartmentAndAD(ctx, compartmentId, *availabilityDomain.Name) // if we are getting errors for ListInstances the issue can be authorization or other issues diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index 50ff63e3bf..405f96ed1b 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -16,8 +16,6 @@ package client import ( "context" - "os" - "strings" "time" "github.com/oracle/oci-go-sdk/v65/common" @@ -195,40 +193,6 @@ type client struct { logger *zap.SugaredLogger } -func setupBaseClient(log *zap.SugaredLogger, client *common.BaseClient, signer common.HTTPRequestSigner, interceptor common.RequestInterceptor, endpointOverrideEnvVar string) { - client.Signer = signer - client.Interceptor = interceptor - if endpointOverrideEnvVar != "" { - endpointOverride, ok := os.LookupEnv(endpointOverrideEnvVar) - if ok && endpointOverride != "" { - client.Host = endpointOverride - } - } - clusterIpFamily, ok := os.LookupEnv(ClusterIpFamilyEnv) - // currently as dual stack endpoints are going to be present in selected regions, only for IPv6 single stack cluster we will be using dual stack endpoints - if ok && strings.EqualFold(clusterIpFamily, Ipv6Stack) { - // TODO: Uncomment once method is available in Public SDK - //client.EnableDualStackEndpoints(true) - - region, ok := os.LookupEnv("OCI_RESOURCE_PRINCIPAL_REGION") - if !ok { - log.Errorf("unable to get OCI_RESOURCE_PRINCIPAL_REGION env var for region") - } - - authEndpoint, ok := os.LookupEnv("OCI_SDK_AUTH_CLIENT_REGION_URL") - if !ok { - authDualStackEndpoint := common.StringToRegion(region).EndpointForTemplate("", "ds.auth.{region}.oci.{secondLevelDomain}") - if err := os.Setenv("OCI_SDK_AUTH_CLIENT_REGION_URL", authDualStackEndpoint); err != nil { - log.Errorf("unable to set OCI_SDK_AUTH_CLIENT_REGION_URL env var for oci auth dual stack endpoint") - } else { - log.Infof("OCI_SDK_AUTH_CLIENT_REGION_URL env var set to: %s", authDualStackEndpoint) - } - } else { - log.Infof("OCI_SDK_AUTH_CLIENT_REGION_URL env var set to: %s", authEndpoint) - } - } -} - // New constructs an OCI API client. func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimiter *RateLimiter, targetTenancyID string) (Interface, error) { @@ -237,8 +201,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewComputeClientWithConfigurationProvider") } - setupBaseClient(logger, &compute.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &compute.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring load balancer client custom transport") @@ -249,8 +211,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewVirtualNetworkClientWithConfigurationProvider") } - setupBaseClient(logger, &network.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &network.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring load balancer client custom transport") @@ -261,8 +221,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewLoadBalancerClientWithConfigurationProvider") } - setupBaseClient(logger, &lb.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &lb.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring loadbalancer client custom transport") @@ -273,8 +231,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewNetworkLoadBalancerClientWithConfigurationProvider") } - setupBaseClient(logger, &nlb.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &nlb.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring networkloadbalancer client custom transport") @@ -285,8 +241,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewIdentityClientWithConfigurationProvider") } - setupBaseClient(logger, &identity.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &identity.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring identity service client custom transport") @@ -298,7 +252,7 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit // return nil, errors.Wrap(err, "NewCompartmentsClientWithConfigurationProvider") //} // - //setupBaseClient(logger, &compartment.BaseClient, nil, nil, "") + //setupBaseClient(logger, &compartment.BaseClient, signer, interceptor, "") // //err = configureCustomTransport(logger, &compartment.BaseClient) //if err != nil { @@ -310,8 +264,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewBlockstorageClientWithConfigurationProvider") } - setupBaseClient(logger, &bs.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &bs.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring block storage service client custom transport") @@ -322,8 +274,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewFileStorageClientWithConfigurationProvider") } - setupBaseClient(logger, &fss.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &fss.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring file storage service client custom transport") @@ -394,7 +344,6 @@ func (c *client) LoadBalancer(logger *zap.SugaredLogger, lbType string, targetTe logger.Error("Failed to get new LB client with oke workload identity configuration provider! Error:" + err.Error()) return nil } - setupBaseClient(logger, &lb.BaseClient, nil, nil, "") err = configureCustomTransport(logger, &lb.BaseClient) if err != nil { @@ -414,7 +363,6 @@ func (c *client) LoadBalancer(logger *zap.SugaredLogger, lbType string, targetTe logger.Error("Failed to get new NLB client with oke workload identity configuration provider! Error:" + err.Error()) return nil } - setupBaseClient(logger, &nlb.BaseClient, nil, nil, "") err = configureCustomTransport(logger, &nlb.BaseClient) if err != nil { @@ -445,8 +393,6 @@ func (c *client) Networking(ociClientConfig *OCIClientConfig) NetworkingInterfac return nil } - setupBaseClient(c.logger, &network.BaseClient, nil, nil, "") - err = configureCustomTransport(c.logger, &network.BaseClient) if err != nil { c.logger.Error("Failed configure custom transport for Network Client %v", err) @@ -483,8 +429,6 @@ func (c *client) Identity(ociClientConfig *OCIClientConfig) IdentityInterface { return nil } - setupBaseClient(c.logger, &identity.BaseClient, nil, nil, "") - err = configureCustomTransport(c.logger, &identity.BaseClient) if err != nil { c.logger.Error("Failed configure custom transport for Identity Client %v", err) @@ -498,7 +442,7 @@ func (c *client) Identity(ociClientConfig *OCIClientConfig) IdentityInterface { // return nil //} // - //setupBaseClient(c.logger, &compartment.BaseClient, nil, nil, "") + //setupBaseClient(c.logger, &compartment.BaseClient, signer, interceptor, "") // //err = configureCustomTransport(c.logger, &compartment.BaseClient) //if err != nil { @@ -508,11 +452,11 @@ func (c *client) Identity(ociClientConfig *OCIClientConfig) IdentityInterface { return &client{ //compartment: &compartment, - identity: &identity, - requestMetadata: c.requestMetadata, - rateLimiter: c.rateLimiter, - subnetCache: cache.NewTTLStore(subnetCacheKeyFn, time.Duration(24)*time.Hour), - logger: c.logger, + identity: &identity, + requestMetadata: c.requestMetadata, + rateLimiter: c.rateLimiter, + subnetCache: cache.NewTTLStore(subnetCacheKeyFn, time.Duration(24)*time.Hour), + logger: c.logger, } } return c @@ -536,8 +480,6 @@ func (c *client) FSS(ociClientConfig *OCIClientConfig) FileStorageInterface { return nil } - setupBaseClient(c.logger, &fc.BaseClient, nil, nil, "") - err = configureCustomTransport(c.logger, &fc.BaseClient) if err != nil { c.logger.Errorf("Failed configure custom transport for FSS Client %v", err.Error()) diff --git a/pkg/oci/client/client_factory.go b/pkg/oci/client/client_factory.go index 678671466f..1d7410e7aa 100644 --- a/pkg/oci/client/client_factory.go +++ b/pkg/oci/client/client_factory.go @@ -19,7 +19,7 @@ import ( "go.uber.org/zap" ) -//GetClient returns the client for given Configuration +// GetClient returns the client for given Configuration func GetClient(logger *zap.SugaredLogger, cfg *config.Config) (Interface, error) { cp, err := config.NewConfigurationProvider(cfg) if err != nil { @@ -29,6 +29,6 @@ func GetClient(logger *zap.SugaredLogger, cfg *config.Config) (Interface, error) rateLimiter := NewRateLimiter(logger, cfg.RateLimiter) - c, err := New(logger, cp, &rateLimiter) + c, err := New(logger, cp, &rateLimiter, cfg.Auth.TenancyID) return c, err } diff --git a/pkg/oci/client/identity.go b/pkg/oci/client/identity.go index b71c6f9d58..d5f1a33c19 100644 --- a/pkg/oci/client/identity.go +++ b/pkg/oci/client/identity.go @@ -28,6 +28,24 @@ import ( // by the volume provisioner. type IdentityInterface interface { GetAvailabilityDomainByName(ctx context.Context, compartmentID, name string) (*identity.AvailabilityDomain, error) + ListAvailabilityDomains(ctx context.Context, compartmentID string) ([]identity.AvailabilityDomain, error) +} + +func (c *client) ListAvailabilityDomains(ctx context.Context, compartmentID string) ([]identity.AvailabilityDomain, error) { + if !c.rateLimiter.Reader.TryAccept() { + return nil, RateLimitError(false, "ListAvailabilityDomains") + } + + resp, err := c.identity.ListAvailabilityDomains(ctx, identity.ListAvailabilityDomainsRequest{ + CompartmentId: &compartmentID, + RequestMetadata: c.requestMetadata, + }) + incRequestCounter(err, listVerb, availabilityDomainResource) + if err != nil { + return nil, errors.WithStack(err) + } + + return resp.Items, nil } func (c *client) GetAvailabilityDomainByName(ctx context.Context, compartmentID, name string) (*identity.AvailabilityDomain, error) { diff --git a/pkg/volume/provisioner/core/provisioner.go b/pkg/volume/provisioner/core/provisioner.go index 3cc61cc119..e1bb67872e 100644 --- a/pkg/volume/provisioner/core/provisioner.go +++ b/pkg/volume/provisioner/core/provisioner.go @@ -124,7 +124,7 @@ func NewOCIProvisioner(logger *zap.SugaredLogger, kubeClient kubernetes.Interfac rateLimiter := client.NewRateLimiter(logger, cfg.RateLimiter) - client, err := client.New(logger, cp, &rateLimiter) + client, err := client.New(logger, cp, &rateLimiter, cfg.Auth.TenancyID) if err != nil { return nil, errors.Wrapf(err, "unable to construct OCI client") } diff --git a/test/e2e/cloud-provider-oci/fss_dynamic.go b/test/e2e/cloud-provider-oci/fss_dynamic.go index 4a653c8068..332daee119 100644 --- a/test/e2e/cloud-provider-oci/fss_dynamic.go +++ b/test/e2e/cloud-provider-oci/fss_dynamic.go @@ -27,7 +27,7 @@ import ( ) const ( - defaultExportOptionsJsonString = "[{\"source\":\"10.0.0.0/16\",\"requirePrivilegedSourcePort\":false,\"access\":\"READ_WRITE\",\"identitySquash\":\"NONE\",\"anonymousUid\":0,\"anonymousGid\":0},{\"source\":\"2603:c020:4015:2100::/56\",\"requirePrivilegedSourcePort\":false,\"access\":\"READ_WRITE\",\"identitySquash\":\"NONE\"},{\"source\":\"2603:c020:11:1500::/56\",\"requirePrivilegedSourcePort\":false,\"access\":\"READ_WRITE\",\"identitySquash\":\"NONE\"}]" + defaultExportOptionsJsonString = "[{\"source\":\"10.0.0.0/16\",\"requirePrivilegedSourcePort\":false,\"access\":\"READ_WRITE\",\"identitySquash\":\"NONE\",\"anonymousUid\":0,\"anonymousGid\":0}]" ) var _ = Describe("Dynamic FSS test in cluster compartment", func() { @@ -92,13 +92,16 @@ var _ = Describe("Dynamic FSS test in cluster compartment", func() { pvc := pvcJig.CreateAndAwaitPVCOrFailDynamicFSS(f.Namespace.Name, "50Gi", scName, v1.ClaimPending, nil) writePod, readPod := pvcJig.CheckSinglePodReadWrite(f.Namespace.Name, pvc.Name, false, []string{}) //adding pod deletion check as resources are being created by using workload identity resource principal - err := pvcJig.DeleteAndAwaitPod(f.Namespace.Name, writePod); if err != nil { + err := pvcJig.DeleteAndAwaitPod(f.Namespace.Name, writePod) + if err != nil { framework.Failf("Error deleting pod: %v", err) } - err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, readPod); if err != nil { + err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, readPod) + if err != nil { framework.Failf("Error deleting pod: %v", err) } - err = pvcJig.DeleteAndAwaitPVC(f.Namespace.Name, pvc.Name); if err != nil { + err = pvcJig.DeleteAndAwaitPVC(f.Namespace.Name, pvc.Name) + if err != nil { framework.Failf("Error deleting PVC: %v", err) } }) @@ -201,15 +204,15 @@ var _ = Describe("Dynamic FSS test in cluster compartment", func() { f.StorageClasses = append(f.StorageClasses, scName5) pvc5 := pvcJig.CreateAndAwaitPVCOrFailDynamicFSS(f.Namespace.Name, "50Gi", scName5, v1.ClaimPending, nil) writePod5, readPod5 := pvcJig.CheckSinglePodReadWrite(f.Namespace.Name, pvc5.Name, false, []string{}) - err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, writePod5); + err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, writePod5) if err != nil { framework.Failf("Error deleting pod: %v", err) } - err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, readPod5); + err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, readPod5) if err != nil { framework.Failf("Error deleting pod: %v", err) } - err = pvcJig.DeleteAndAwaitPVC(f.Namespace.Name, pvc5.Name); + err = pvcJig.DeleteAndAwaitPVC(f.Namespace.Name, pvc5.Name) if err != nil { framework.Failf("Error deleting PVC: %v", err) } @@ -293,13 +296,16 @@ var _ = Describe("Dynamic FSS test in different compartment", func() { pvc := pvcJig.CreateAndAwaitPVCOrFailDynamicFSS(f.Namespace.Name, "50Gi", scName, v1.ClaimPending, nil) writePod, readPod := pvcJig.CheckSinglePodReadWrite(f.Namespace.Name, pvc.Name, false, []string{}) //adding pod deletion check as resources are being created by using workload identity resource principal - err := pvcJig.DeleteAndAwaitPod(f.Namespace.Name, writePod); if err != nil { + err := pvcJig.DeleteAndAwaitPod(f.Namespace.Name, writePod) + if err != nil { framework.Failf("Error deleting pod: %v", err) } - err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, readPod); if err != nil { + err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, readPod) + if err != nil { framework.Failf("Error deleting pod: %v", err) } - err = pvcJig.DeleteAndAwaitPVC(f.Namespace.Name, pvc.Name); if err != nil { + err = pvcJig.DeleteAndAwaitPVC(f.Namespace.Name, pvc.Name) + if err != nil { framework.Failf("Error deleting PVC: %v", err) } }) @@ -401,15 +407,15 @@ var _ = Describe("Dynamic FSS test in different compartment", func() { f.StorageClasses = append(f.StorageClasses, scName5) pvc5 := pvcJig.CreateAndAwaitPVCOrFailDynamicFSS(f.Namespace.Name, "50Gi", scName5, v1.ClaimPending, nil) writePod5, readPod5 := pvcJig.CheckSinglePodReadWrite(f.Namespace.Name, pvc5.Name, false, []string{}) - err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, writePod5); + err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, writePod5) if err != nil { framework.Failf("Error deleting pod: %v", err) } - err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, readPod5); + err = pvcJig.DeleteAndAwaitPod(f.Namespace.Name, readPod5) if err != nil { framework.Failf("Error deleting pod: %v", err) } - err = pvcJig.DeleteAndAwaitPVC(f.Namespace.Name, pvc5.Name); + err = pvcJig.DeleteAndAwaitPVC(f.Namespace.Name, pvc5.Name) if err != nil { framework.Failf("Error deleting PVC: %v", err) } diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/common/auth/federation_client.go b/vendor/github.com/oracle/oci-go-sdk/v65/common/auth/federation_client.go index dbb74c6c00..e3805e3931 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/common/auth/federation_client.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/common/auth/federation_client.go @@ -203,6 +203,13 @@ func newAuthClient(region common.Region, provider common.KeyProvider) *common.Ba client.Host = region.Endpoint("auth") } client.BasePath = "v1/x509" + + if common.GlobalAuthClientCircuitBreakerSetting != nil { + client.Configuration.CircuitBreaker = common.NewCircuitBreaker(common.GlobalAuthClientCircuitBreakerSetting) + } else if !common.IsEnvVarFalse("OCI_SDK_AUTH_CLIENT_CIRCUIT_BREAKER_ENABLED") { + common.Logf("Configuring DefaultAuthClientCircuitBreakerSetting for federation client") + client.Configuration.CircuitBreaker = common.NewCircuitBreaker(common.DefaultAuthClientCircuitBreakerSetting()) + } return &client } @@ -288,7 +295,7 @@ func (c *x509FederationClient) getSecurityToken() (securityToken, error) { var httpResponse *http.Response defer common.CloseBodyIfValid(httpResponse) - for retry := 0; retry < 5; retry++ { + for retry := 0; retry < 3; retry++ { request := c.makeX509FederationRequest() if httpRequest, err = common.MakeDefaultHTTPRequestWithTaggedStruct(http.MethodPost, "", request); err != nil { @@ -298,7 +305,10 @@ func (c *x509FederationClient) getSecurityToken() (securityToken, error) { if httpResponse, err = c.authClient.Call(context.Background(), &httpRequest); err == nil { break } - + // Don't retry on 4xx errors + if httpResponse != nil && httpResponse.StatusCode >= 400 && httpResponse.StatusCode <= 499 { + return nil, fmt.Errorf("error %s returned by auth service: %s", httpResponse.Status, err.Error()) + } nextDuration := time.Duration(1000.0*(math.Pow(2.0, float64(retry)))) * time.Millisecond time.Sleep(nextDuration) } diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/common/circuit_breaker.go b/vendor/github.com/oracle/oci-go-sdk/v65/common/circuit_breaker.go index 96e9323147..b6635f0d48 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/common/circuit_breaker.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/common/circuit_breaker.go @@ -5,6 +5,7 @@ package common import ( "fmt" + "math/rand" "net/http" "os" "strconv" @@ -29,6 +30,16 @@ const ( DefaultCircuitBreakerServiceName string = "" // DefaultCircuitBreakerHistoryCount is the default count of failed response history in circuit breaker DefaultCircuitBreakerHistoryCount int = 5 + // MinAuthClientCircuitBreakerResetTimeout is the min value of openStateWindow, which is the wait time before setting the breaker to halfOpen state from open state + MinAuthClientCircuitBreakerResetTimeout = 30 + // MaxAuthClientCircuitBreakerResetTimeout is the max value of openStateWindow, which is the wait time before setting the breaker to halfOpen state from open state + MaxAuthClientCircuitBreakerResetTimeout = 49 + // AuthClientCircuitBreakerName is the default circuit breaker name for the DefaultAuthClientCircuitBreakerSetting + AuthClientCircuitBreakerName = "FederationClientCircuitBreaker" + // AuthClientCircuitBreakerDefaultFailureThreshold is the default requests failure rate for the DefaultAuthClientCircuitBreakerSetting + AuthClientCircuitBreakerDefaultFailureThreshold float64 = 0.65 + // AuthClientCircuitBreakerDefaultMinimumRequests is the default value of minimumRequests in closed status + AuthClientCircuitBreakerDefaultMinimumRequests uint32 = 3 ) // CircuitBreakerSetting wraps all exposed configurable params of circuit breaker @@ -213,7 +224,7 @@ func NewCircuitBreakerSettingWithOptions(opts ...CircuitBreakerOption) *CircuitB for _, opt := range opts { opt(cbst) } - if defaultLogger.LogLevel() == verboseLogging { + if defaultLogger != nil && defaultLogger.LogLevel() == verboseLogging { Debugf("Circuit Breaker setting: %s\n", cbst.String()) } @@ -383,3 +394,17 @@ func ConfigCircuitBreakerFromGlobalVar(baseClient *BaseClient) { baseClient.Configuration.CircuitBreaker = NewCircuitBreaker(GlobalCircuitBreakerSetting) } } + +// DefaultAuthClientCircuitBreakerSetting returns the default circuit breaker setting for the Auth Client +func DefaultAuthClientCircuitBreakerSetting() *CircuitBreakerSetting { + return NewCircuitBreakerSettingWithOptions( + WithOpenStateWindow(time.Duration(rand.Intn(MaxAuthClientCircuitBreakerResetTimeout+1-MinAuthClientCircuitBreakerResetTimeout)+MinAuthClientCircuitBreakerResetTimeout)*time.Second), + WithName(AuthClientCircuitBreakerName), + WithFailureRateThreshold(AuthClientCircuitBreakerDefaultFailureThreshold), + WithMinimumRequests(AuthClientCircuitBreakerDefaultMinimumRequests), + ) +} + +// GlobalAuthClientCircuitBreakerSetting is global level circuit breaker setting for the Auth Client +// than client level circuit breaker +var GlobalAuthClientCircuitBreakerSetting *CircuitBreakerSetting = nil diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/common/regions.go b/vendor/github.com/oracle/oci-go-sdk/v65/common/regions.go index 8f6bfd205d..98075ae994 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/common/regions.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/common/regions.go @@ -138,10 +138,18 @@ const ( RegionEUCrissier1 Region = "eu-crissier-1" //RegionMEAbudhabi3 region Abudhabi RegionMEAbudhabi3 Region = "me-abudhabi-3" + //RegionMEAlain1 region Alain + RegionMEAlain1 Region = "me-alain-1" //RegionMEAbudhabi2 region Abudhabi RegionMEAbudhabi2 Region = "me-abudhabi-2" //RegionMEAbudhabi4 region Abudhabi RegionMEAbudhabi4 Region = "me-abudhabi-4" + //RegionAPSeoul2 region Seoul + RegionAPSeoul2 Region = "ap-seoul-2" + //RegionAPSuwon1 region Suwon + RegionAPSuwon1 Region = "ap-suwon-1" + //RegionAPChuncheon2 region Chuncheon + RegionAPChuncheon2 Region = "ap-chuncheon-2" ) var shortNameRegion = map[string]Region{ @@ -212,8 +220,12 @@ var shortNameRegion = map[string]Region{ "avz": RegionEUDccZurich1, "avf": RegionEUCrissier1, "ahu": RegionMEAbudhabi3, + "rba": RegionMEAlain1, "rkt": RegionMEAbudhabi2, "shj": RegionMEAbudhabi4, + "dtz": RegionAPSeoul2, + "dln": RegionAPSuwon1, + "bno": RegionAPChuncheon2, } var realm = map[string]string{ @@ -233,6 +245,7 @@ var realm = map[string]string{ "oc24": "oraclecloud24.com", "oc26": "oraclecloud26.com", "oc29": "oraclecloud29.com", + "oc35": "oraclecloud35.com", } var regionRealm = map[Region]string{ @@ -317,7 +330,12 @@ var regionRealm = map[Region]string{ RegionEUCrissier1: "oc24", RegionMEAbudhabi3: "oc26", + RegionMEAlain1: "oc26", RegionMEAbudhabi2: "oc29", RegionMEAbudhabi4: "oc29", + + RegionAPSeoul2: "oc35", + RegionAPSuwon1: "oc35", + RegionAPChuncheon2: "oc35", } diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/common/regions.json b/vendor/github.com/oracle/oci-go-sdk/v65/common/regions.json index 7bd7989da2..84ffa621cd 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/common/regions.json +++ b/vendor/github.com/oracle/oci-go-sdk/v65/common/regions.json @@ -412,5 +412,29 @@ "realmKey": "oc23", "regionIdentifier": "us-thames-1", "realmDomainComponent": "oraclecloud23.com" + }, + { + "regionKey": "dtz", + "realmKey": "oc35", + "regionIdentifier": "ap-seoul-2", + "realmDomainComponent": "oraclecloud35.com" + }, + { + "regionKey": "dln", + "realmKey": "oc35", + "regionIdentifier": "ap-suwon-1", + "realmDomainComponent": "oraclecloud35.com" + }, + { + "regionKey": "bno", + "realmKey": "oc35", + "regionIdentifier": "ap-chuncheon-2", + "realmDomainComponent": "oraclecloud35.com" + }, + { + "regionKey": "rba", + "realmKey": "oc26", + "regionIdentifier": "me-alain-1", + "realmDomainComponent": "oraclecloud26.com" } ] \ No newline at end of file diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/common/version.go b/vendor/github.com/oracle/oci-go-sdk/v65/common/version.go index 03a12a54e6..af27c28086 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/common/version.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/common/version.go @@ -12,7 +12,7 @@ import ( const ( major = "65" - minor = "77" + minor = "79" patch = "0" tag = "" ) diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/containerengine/install_addon_details.go b/vendor/github.com/oracle/oci-go-sdk/v65/containerengine/install_addon_details.go index cd9a766908..4fe7ad4787 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/containerengine/install_addon_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/containerengine/install_addon_details.go @@ -28,6 +28,9 @@ type InstallAddonDetails struct { // Addon configuration details. Configurations []AddonConfiguration `mandatory:"false" json:"configurations"` + + // Whether or not to override an existing addon installation. Defaults to false. If set to true, any existing addon installation would be overridden as per new installation details. + IsOverrideExisting *bool `mandatory:"false" json:"isOverrideExisting"` } func (m InstallAddonDetails) String() string { diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/create_listener_details.go b/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/create_listener_details.go index 6f1d93b5ff..bea5393999 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/create_listener_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/create_listener_details.go @@ -30,9 +30,8 @@ type CreateListenerDetails struct { // Example: `80` Port *int `mandatory:"true" json:"port"` - // The protocol on which the listener accepts connection requests. - // To get a list of valid protocols, use the ListProtocols - // operation. + // The protocol on which the listener accepts connection requests. The supported protocols are HTTP, HTTP2, TCP, and GRPC. + // You can also use the ListProtocols operation to get a list of valid protocols. // Example: `HTTP` Protocol *string `mandatory:"true" json:"protocol"` diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/listener.go b/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/listener.go index ae1c79b543..1e6cddab89 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/listener.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/listener.go @@ -33,9 +33,8 @@ type Listener struct { // Example: `80` Port *int `mandatory:"true" json:"port"` - // The protocol on which the listener accepts connection requests. - // To get a list of valid protocols, use the ListProtocols - // operation. + // The protocol on which the listener accepts connection requests. The supported protocols are HTTP, HTTP2, TCP, and GRPC. + // You can also use the ListProtocols operation to get a list of valid protocols. // Example: `HTTP` Protocol *string `mandatory:"true" json:"protocol"` diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/listener_details.go b/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/listener_details.go index 914f4a4f59..9814c72eb9 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/listener_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/listener_details.go @@ -27,9 +27,8 @@ type ListenerDetails struct { // Example: `80` Port *int `mandatory:"true" json:"port"` - // The protocol on which the listener accepts connection requests. - // To get a list of valid protocols, use the ListProtocols - // operation. + // The protocol on which the listener accepts connection requests. The supported protocols are HTTP, HTTP2, TCP, and GRPC. + // You can also use the ListProtocols operation to get a list of valid protocols. // Example: `HTTP` Protocol *string `mandatory:"true" json:"protocol"` diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/update_listener_details.go b/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/update_listener_details.go index 5299af7238..87d824005a 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/update_listener_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/loadbalancer/update_listener_details.go @@ -27,9 +27,8 @@ type UpdateListenerDetails struct { // Example: `80` Port *int `mandatory:"true" json:"port"` - // The protocol on which the listener accepts connection requests. - // To get a list of valid protocols, use the ListProtocols - // operation. + // The protocol on which the listener accepts connection requests. The supported protocols are HTTP, HTTP2, TCP, and GRPC. + // You can also use the ListProtocols operation to get a list of valid protocols. // Example: `HTTP` Protocol *string `mandatory:"true" json:"protocol"` diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set.go index f0616ef9a5..bdc8a7ae67 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set.go @@ -27,7 +27,6 @@ type BackendSet struct { // Example: `example_backend_set` Name *string `mandatory:"true" json:"name"` - // The health check policy configuration. HealthChecker *HealthChecker `mandatory:"true" json:"healthChecker"` // The network load balancer policy for the backend set. diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set_details.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set_details.go index 666a7490b9..dac452a982 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set_details.go @@ -20,8 +20,6 @@ import ( // Managing Backend Sets (https://docs.cloud.oracle.com/Content/Balance/Tasks/managingbackendsets.htm). // **Caution:** Oracle recommends that you avoid using any confidential information when you supply string values using the API. type BackendSetDetails struct { - - // The health check policy configuration. HealthChecker *HealthChecker `mandatory:"true" json:"healthChecker"` // The network load balancer policy for the backend set. diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set_summary.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set_summary.go index e807c241f5..cb1886532a 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set_summary.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/backend_set_summary.go @@ -15,7 +15,10 @@ import ( "strings" ) -// BackendSetSummary The representation of BackendSetSummary +// BackendSetSummary The configuration of a network load balancer backend set. +// For more information about backend set configuration, see +// Managing Backend Sets (https://docs.cloud.oracle.com/Content/Balance/Tasks/managingbackendsets.htm). +// **Caution:** Oracle recommends that you avoid using any confidential information when you supply string values using the API. type BackendSetSummary struct { // A user-friendly name for the backend set that must be unique and cannot be changed. @@ -31,7 +34,6 @@ type BackendSetSummary struct { // An array of backends. Backends []Backend `mandatory:"true" json:"backends"` - // The health check policy configuration. HealthChecker *HealthChecker `mandatory:"true" json:"healthChecker"` // If this parameter is enabled, the network load balancer preserves the source IP of the packet forwarded to the backend servers. diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/create_listener_details.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/create_listener_details.go index f7a5faa926..b73698b96c 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/create_listener_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/create_listener_details.go @@ -52,6 +52,10 @@ type CreateListenerDetails struct { // The duration for UDP idle timeout in seconds. // Example: `120` UdpIdleTimeout *int `mandatory:"false" json:"udpIdleTimeout"` + + // The duration for L3IP idle timeout in seconds. + // Example: `200` + L3IpIdleTimeout *int `mandatory:"false" json:"l3IpIdleTimeout"` } func (m CreateListenerDetails) String() string { diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener.go index b2da9e1185..655a86a6ec 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener.go @@ -52,6 +52,10 @@ type Listener struct { // The duration for UDP idle timeout in seconds. // Example: `120` UdpIdleTimeout *int `mandatory:"false" json:"udpIdleTimeout"` + + // The duration for L3IP idle timeout in seconds. + // Example: `200` + L3IpIdleTimeout *int `mandatory:"false" json:"l3IpIdleTimeout"` } func (m Listener) String() string { diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_details.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_details.go index 194c1399f6..3437ab6657 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_details.go @@ -52,6 +52,10 @@ type ListenerDetails struct { // The duration for UDP idle timeout in seconds. // Example: `120` UdpIdleTimeout *int `mandatory:"false" json:"udpIdleTimeout"` + + // The duration for L3IP idle timeout in seconds. + // Example: `200` + L3IpIdleTimeout *int `mandatory:"false" json:"l3IpIdleTimeout"` } func (m ListenerDetails) String() string { diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_protocols.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_protocols.go index 14afdbfea6..f77bb18e51 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_protocols.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_protocols.go @@ -22,6 +22,7 @@ const ( ListenerProtocolsTcp ListenerProtocolsEnum = "TCP" ListenerProtocolsUdp ListenerProtocolsEnum = "UDP" ListenerProtocolsTcpAndUdp ListenerProtocolsEnum = "TCP_AND_UDP" + ListenerProtocolsL3Ip ListenerProtocolsEnum = "L3IP" ) var mappingListenerProtocolsEnum = map[string]ListenerProtocolsEnum{ @@ -29,6 +30,7 @@ var mappingListenerProtocolsEnum = map[string]ListenerProtocolsEnum{ "TCP": ListenerProtocolsTcp, "UDP": ListenerProtocolsUdp, "TCP_AND_UDP": ListenerProtocolsTcpAndUdp, + "L3IP": ListenerProtocolsL3Ip, } var mappingListenerProtocolsEnumLowerCase = map[string]ListenerProtocolsEnum{ @@ -36,6 +38,7 @@ var mappingListenerProtocolsEnumLowerCase = map[string]ListenerProtocolsEnum{ "tcp": ListenerProtocolsTcp, "udp": ListenerProtocolsUdp, "tcp_and_udp": ListenerProtocolsTcpAndUdp, + "l3ip": ListenerProtocolsL3Ip, } // GetListenerProtocolsEnumValues Enumerates the set of values for ListenerProtocolsEnum @@ -54,6 +57,7 @@ func GetListenerProtocolsEnumStringValues() []string { "TCP", "UDP", "TCP_AND_UDP", + "L3IP", } } diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_summary.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_summary.go index a2f8f8e26d..f50b9d26a9 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_summary.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/listener_summary.go @@ -52,6 +52,10 @@ type ListenerSummary struct { // The duration for UDP idle timeout in seconds. // Example: `120` UdpIdleTimeout *int `mandatory:"false" json:"udpIdleTimeout"` + + // The duration for L3IP idle timeout in seconds. + // Example: `200` + L3IpIdleTimeout *int `mandatory:"false" json:"l3IpIdleTimeout"` } func (m ListenerSummary) String() string { diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/network_load_balancers_protocol_summary.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/network_load_balancers_protocol_summary.go index 7d16a10bcc..92dc5380a6 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/network_load_balancers_protocol_summary.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/network_load_balancers_protocol_summary.go @@ -22,6 +22,7 @@ const ( NetworkLoadBalancersProtocolSummaryTcp NetworkLoadBalancersProtocolSummaryEnum = "TCP" NetworkLoadBalancersProtocolSummaryUdp NetworkLoadBalancersProtocolSummaryEnum = "UDP" NetworkLoadBalancersProtocolSummaryTcpAndUdp NetworkLoadBalancersProtocolSummaryEnum = "TCP_AND_UDP" + NetworkLoadBalancersProtocolSummaryL3Ip NetworkLoadBalancersProtocolSummaryEnum = "L3IP" ) var mappingNetworkLoadBalancersProtocolSummaryEnum = map[string]NetworkLoadBalancersProtocolSummaryEnum{ @@ -29,6 +30,7 @@ var mappingNetworkLoadBalancersProtocolSummaryEnum = map[string]NetworkLoadBalan "TCP": NetworkLoadBalancersProtocolSummaryTcp, "UDP": NetworkLoadBalancersProtocolSummaryUdp, "TCP_AND_UDP": NetworkLoadBalancersProtocolSummaryTcpAndUdp, + "L3IP": NetworkLoadBalancersProtocolSummaryL3Ip, } var mappingNetworkLoadBalancersProtocolSummaryEnumLowerCase = map[string]NetworkLoadBalancersProtocolSummaryEnum{ @@ -36,6 +38,7 @@ var mappingNetworkLoadBalancersProtocolSummaryEnumLowerCase = map[string]Network "tcp": NetworkLoadBalancersProtocolSummaryTcp, "udp": NetworkLoadBalancersProtocolSummaryUdp, "tcp_and_udp": NetworkLoadBalancersProtocolSummaryTcpAndUdp, + "l3ip": NetworkLoadBalancersProtocolSummaryL3Ip, } // GetNetworkLoadBalancersProtocolSummaryEnumValues Enumerates the set of values for NetworkLoadBalancersProtocolSummaryEnum @@ -54,6 +57,7 @@ func GetNetworkLoadBalancersProtocolSummaryEnumStringValues() []string { "TCP", "UDP", "TCP_AND_UDP", + "L3IP", } } diff --git a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/update_listener_details.go b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/update_listener_details.go index e0f1ff5a3b..fdc59ed7db 100644 --- a/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/update_listener_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/v65/networkloadbalancer/update_listener_details.go @@ -48,6 +48,10 @@ type UpdateListenerDetails struct { // The duration for UDP idle timeout in seconds. // Example: `120` UdpIdleTimeout *int `mandatory:"false" json:"udpIdleTimeout"` + + // The duration for L3IP idle timeout in seconds. + // Example: `200` + L3IpIdleTimeout *int `mandatory:"false" json:"l3IpIdleTimeout"` } func (m UpdateListenerDetails) String() string { diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go new file mode 100644 index 0000000000..acc9a670b2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -0,0 +1,264 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package noop provides an implementation of the OpenTelemetry metric API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry metric API will +// effectively disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry metric API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/metric/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Observer = Observer{} + _ metric.Registration = Registration{} + _ metric.Int64Counter = Int64Counter{} + _ metric.Float64Counter = Float64Counter{} + _ metric.Int64UpDownCounter = Int64UpDownCounter{} + _ metric.Float64UpDownCounter = Float64UpDownCounter{} + _ metric.Int64Histogram = Int64Histogram{} + _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64ObservableCounter = Int64ObservableCounter{} + _ metric.Float64ObservableCounter = Float64ObservableCounter{} + _ metric.Int64ObservableGauge = Int64ObservableGauge{} + _ metric.Float64ObservableGauge = Float64ObservableGauge{} + _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} + _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} + _ metric.Int64Observer = Int64Observer{} + _ metric.Float64Observer = Float64Observer{} +) + +// MeterProvider is an OpenTelemetry No-Op MeterProvider. +type MeterProvider struct{ embedded.MeterProvider } + +// NewMeterProvider returns a MeterProvider that does not record any telemetry. +func NewMeterProvider() MeterProvider { + return MeterProvider{} +} + +// Meter returns an OpenTelemetry Meter that does not record any telemetry. +func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return Meter{} +} + +// Meter is an OpenTelemetry No-Op Meter. +type Meter struct{ embedded.Meter } + +// Int64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return Int64Counter{}, nil +} + +// Int64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return Int64UpDownCounter{}, nil +} + +// Int64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + return Int64Histogram{}, nil +} + +// Int64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + return Int64ObservableCounter{}, nil +} + +// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + return Int64ObservableUpDownCounter{}, nil +} + +// Int64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + return Int64ObservableGauge{}, nil +} + +// Float64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { + return Float64Counter{}, nil +} + +// Float64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + return Float64UpDownCounter{}, nil +} + +// Float64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + return Float64Histogram{}, nil +} + +// Float64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + return Float64ObservableCounter{}, nil +} + +// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + return Float64ObservableUpDownCounter{}, nil +} + +// Float64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + return Float64ObservableGauge{}, nil +} + +// RegisterCallback performs no operation. +func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { + return Registration{}, nil +} + +// Observer acts as a recorder of measurements for multiple instruments in a +// Callback, it performing no operation. +type Observer struct{ embedded.Observer } + +// ObserveFloat64 performs no operation. +func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { +} + +// ObserveInt64 performs no operation. +func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { +} + +// Registration is the registration of a Callback with a No-Op Meter. +type Registration struct{ embedded.Registration } + +// Unregister unregisters the Callback the Registration represents with the +// No-Op Meter. This will always return nil because the No-Op Meter performs no +// operation, including hold any record of registrations. +func (Registration) Unregister() error { return nil } + +// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. +// It produces no telemetry. +type Int64Counter struct{ embedded.Int64Counter } + +// Add performs no operation. +func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64Counter is an OpenTelemetry Counter used to record float64 +// measurements. It produces no telemetry. +type Float64Counter struct{ embedded.Float64Counter } + +// Add performs no operation. +func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 +// measurements. It produces no telemetry. +type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } + +// Add performs no operation. +func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record +// float64 measurements. It produces no telemetry. +type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } + +// Add performs no operation. +func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64Histogram is an OpenTelemetry Histogram used to record int64 +// measurements. It produces no telemetry. +type Int64Histogram struct{ embedded.Int64Histogram } + +// Record performs no operation. +func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Histogram is an OpenTelemetry Histogram used to record float64 +// measurements. It produces no telemetry. +type Float64Histogram struct{ embedded.Float64Histogram } + +// Record performs no operation. +func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableCounter struct { + metric.Int64Observable + embedded.Int64ObservableCounter +} + +// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableCounter struct { + metric.Float64Observable + embedded.Float64ObservableCounter +} + +// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableGauge struct { + metric.Int64Observable + embedded.Int64ObservableGauge +} + +// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableGauge struct { + metric.Float64Observable + embedded.Float64ObservableGauge +} + +// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record int64 measurements. It produces no telemetry. +type Int64ObservableUpDownCounter struct { + metric.Int64Observable + embedded.Int64ObservableUpDownCounter +} + +// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record float64 measurements. It produces no telemetry. +type Float64ObservableUpDownCounter struct { + metric.Float64Observable + embedded.Float64ObservableUpDownCounter +} + +// Int64Observer is a recorder of int64 measurements that performs no operation. +type Int64Observer struct{ embedded.Int64Observer } + +// Observe performs no operation. +func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} + +// Float64Observer is a recorder of float64 measurements that performs no +// operation. +type Float64Observer struct{ embedded.Float64Observer } + +// Observe performs no operation. +func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e2b298d859..43557ab7e9 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1564,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984fd9..3b9f06b962 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c6408d..ce2e8b40ee 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -924,7 +925,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } @@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 0000000000..61075bd16d --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86c6..ce375c8c75 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } } + return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2911,9 +3065,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2955,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go index 69a4ac7eef..1e64157f3e 100644 --- a/vendor/golang.org/x/net/websocket/client.go +++ b/vendor/golang.org/x/net/websocket/client.go @@ -6,10 +6,12 @@ package websocket import ( "bufio" + "context" "io" "net" "net/http" "net/url" + "time" ) // DialError is an error that occurs while dialling a websocket server. @@ -79,28 +81,59 @@ func parseAuthority(location *url.URL) string { // DialConfig opens a new client connection to a WebSocket with a config. func DialConfig(config *Config) (ws *Conn, err error) { - var client net.Conn + return config.DialContext(context.Background()) +} + +// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation. +func (config *Config) DialContext(ctx context.Context) (*Conn, error) { if config.Location == nil { return nil, &DialError{config, ErrBadWebSocketLocation} } if config.Origin == nil { return nil, &DialError{config, ErrBadWebSocketOrigin} } + dialer := config.Dialer if dialer == nil { dialer = &net.Dialer{} } - client, err = dialWithDialer(dialer, config) - if err != nil { - goto Error - } - ws, err = NewClient(config, client) + + client, err := dialWithDialer(ctx, dialer, config) if err != nil { - client.Close() - goto Error + return nil, &DialError{config, err} } - return -Error: - return nil, &DialError{config, err} + // Cleanup the connection if we fail to create the websocket successfully + success := false + defer func() { + if !success { + _ = client.Close() + } + }() + + var ws *Conn + var wsErr error + doneConnecting := make(chan struct{}) + go func() { + defer close(doneConnecting) + ws, err = NewClient(config, client) + if err != nil { + wsErr = &DialError{config, err} + } + }() + + // The websocket.NewClient() function can block indefinitely, make sure that we + // respect the deadlines specified by the context. + select { + case <-ctx.Done(): + // Force the pending operations to fail, terminating the pending connection attempt + _ = client.SetDeadline(time.Now()) + <-doneConnecting // Wait for the goroutine that tries to establish the connection to finish + return nil, &DialError{config, ctx.Err()} + case <-doneConnecting: + if wsErr == nil { + success = true // Disarm the deferred connection cleanup + } + return ws, wsErr + } } diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go index 2dab943a48..8a2d83c473 100644 --- a/vendor/golang.org/x/net/websocket/dial.go +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -5,18 +5,23 @@ package websocket import ( + "context" "crypto/tls" "net" ) -func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { +func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) { switch config.Location.Scheme { case "ws": - conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) case "wss": - conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + tlsDialer := &tls.Dialer{ + NetDialer: dialer, + Config: config.TlsConfig, + } + conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) default: err = ErrBadScheme } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index bd6b17e158..78d918bc72 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -142,7 +142,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -404,7 +404,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 12c680dc97..27085e056a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -158,7 +158,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -419,7 +419,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 53fdf3c8d0..65e1d3c592 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -171,7 +171,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // When specified, it should match one the container or initContainer // names in the pod template. // +optional - ContainerName *string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + ContainerName *string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are @@ -231,14 +231,14 @@ type PodFailurePolicyRule struct { // Represents the requirement on the container exit codes. // +optional - OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes" protobuf:"bytes,2,opt,name=onExitCodes"` + OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes,omitempty" protobuf:"bytes,2,opt,name=onExitCodes"` // Represents the requirement on the pod conditions. The requirement is represented // as a list of pod condition patterns. The requirement is satisfied if at // least one pattern matches an actual pod condition. At most 20 elements are allowed. // +listType=atomic // +optional - OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions" protobuf:"bytes,3,opt,name=onPodConditions"` + OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions,omitempty" protobuf:"bytes,3,opt,name=onPodConditions"` } // PodFailurePolicy describes how failed pods influence the backoffLimit. diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index cf9b6e6ebc..d099238cdf 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -3286,7 +3286,7 @@ message PersistentVolumeStatus { // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 1aade3806f..61ba21bcad 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -423,7 +423,7 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 01152a0964..fd6f7dc61b 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1478,7 +1478,7 @@ var map_PersistentVolumeStatus = map[string]string{ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "message is a human-readable message indicating details about why the volume is in this state.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go index 7cfdd06321..8a741936a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go @@ -344,7 +344,7 @@ func (conn *Conn) handle(ws *websocket.Conn) { continue } if _, err := conn.channels[channel].DataFromSocket(data); err != nil { - klog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) + klog.Errorf("Unable to write frame (%d bytes) to %d: %v", len(data), channel, err) continue } } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go index b2624694c8..9cd3c01aed 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go @@ -180,8 +180,9 @@ func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string, celmetrics.Metrics.ObserveDefinition(context.TODO(), "active", "deny") } - // Skip reconcile if the spec of the definition is unchanged - if info.lastReconciledValue != nil && definition != nil && + // Skip reconcile if the spec of the definition is unchanged and had a + // successful previous sync + if info.configurationError == nil && info.lastReconciledValue != nil && definition != nil && apiequality.Semantic.DeepEqual(info.lastReconciledValue.Spec, definition.Spec) { return nil } diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index f059cef9bc..e524e0c647 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -163,6 +163,13 @@ const ( // Deprecates and removes SelfLink from ObjectMeta and ListMeta. RemoveSelfLink featuregate.Feature = "RemoveSelfLink" + // owner: @serathius + // beta: v1.30 + // + // Allow watch cache to create a watch on a dedicated RPC. + // This prevents watch cache from being starved by other watches. + SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC" + // owner: @apelisse, @lavalamp // alpha: v1.14 // beta: v1.16 @@ -233,6 +240,12 @@ const ( // Enables support for watch bookmark events. WatchBookmark featuregate.Feature = "WatchBookmark" + // owner: @serathius + // beta: 1.30 + // Enables watches without resourceVersion to be served from storage. + // Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue. + WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion" + // owner: @vinaykul // kep: http://kep.k8s.io/1287 // alpha: v1.27 @@ -303,6 +316,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 @@ -319,6 +334,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, WatchList: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index beff08f14d..2142c14e61 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -1142,7 +1142,7 @@ func AuthorizeClientBearerToken(loopback *restclient.Config, authn *Authenticati tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, UID: uid, - Groups: []string{user.SystemPrivilegedGroup}, + Groups: []string{user.AllAuthenticated, user.SystemPrivilegedGroup}, } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens, authn.APIAudiences) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/tracing.go b/vendor/k8s.io/apiserver/pkg/server/options/tracing.go index d56e7df511..7be62fad04 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/tracing.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/tracing.go @@ -23,7 +23,9 @@ import ( "net" "github.com/spf13/pflag" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/semconv/v1.12.0" "google.golang.org/grpc" @@ -48,6 +50,12 @@ var ( codecs = serializer.NewCodecFactory(cfgScheme) ) +func init() { + // Prevent memory leak from OTel metrics, which we don't use: + // https://github.com/open-telemetry/opentelemetry-go-contrib/issues/5190 + otel.SetMeterProvider(noop.NewMeterProvider()) +} + func init() { install.Install(cfgScheme) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 4f40804419..1bfc08d582 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -25,6 +25,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "google.golang.org/grpc/metadata" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -50,7 +51,8 @@ import ( ) var ( - emptyFunc = func(bool) {} + emptyFunc = func(bool) {} + coreNamespaceResource = schema.GroupResource{Group: "", Resource: "namespaces"} ) const ( @@ -397,10 +399,18 @@ func NewCacherFromConfig(config Config) (*Cacher, error) { // so that future reuse does not get a spurious timeout. <-cacher.timer.C } - progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock) + var contextMetadata metadata.MD + if utilfeature.DefaultFeatureGate.Enabled(features.SeparateCacheWatchRPC) { + // Add grpc context metadata to watch and progress notify requests done by cacher to: + // * Prevent starvation of watch opened by cacher, by moving it to separate Watch RPC than watch request that bypass cacher. + // * Ensure that progress notification requests are executed on the same Watch RPC as their watch, which is required for it to work. + contextMetadata = metadata.New(map[string]string{"source": "cache"}) + } + + progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock, contextMetadata) watchCache := newWatchCache( config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource, progressRequester) - listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc, contextMetadata) reflectorName := "storage/cacher.go:" + config.ResourcePrefix reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) @@ -513,7 +523,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) && opts.SendInitialEvents != nil { opts.SendInitialEvents = nil } - if opts.SendInitialEvents == nil && opts.ResourceVersion == "" { + if utilfeature.DefaultFeatureGate.Enabled(features.WatchFromStorageWithoutResourceVersion) && opts.SendInitialEvents == nil && opts.ResourceVersion == "" { return c.storage.Watch(ctx, key, opts) } requestedWatchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) @@ -539,6 +549,12 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions scope.name = selectorName } + // for request like '/api/v1/watch/namespaces/*', set scope.namespace to empty. + // namespaces don't populate metadata.namespace in ObjFields. + if c.groupResource == coreNamespaceResource && len(scope.namespace) > 0 && scope.namespace == scope.name { + scope.namespace = "" + } + triggerValue, triggerSupported := "", false if c.indexedTrigger != nil { for _, field := range pred.IndexFields { diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go index 1252e5e349..2817a93dd0 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go @@ -19,6 +19,8 @@ package cacher import ( "context" + "google.golang.org/grpc/metadata" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -30,17 +32,19 @@ import ( // listerWatcher opaques storage.Interface to expose cache.ListerWatcher. type listerWatcher struct { - storage storage.Interface - resourcePrefix string - newListFunc func() runtime.Object + storage storage.Interface + resourcePrefix string + newListFunc func() runtime.Object + contextMetadata metadata.MD } // NewListerWatcher returns a storage.Interface backed ListerWatcher. -func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { +func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object, contextMetadata metadata.MD) cache.ListerWatcher { return &listerWatcher{ - storage: storage, - resourcePrefix: resourcePrefix, - newListFunc: newListFunc, + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + contextMetadata: contextMetadata, } } @@ -59,7 +63,11 @@ func (lw *listerWatcher) List(options metav1.ListOptions) (runtime.Object, error Predicate: pred, Recursive: true, } - if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil { + ctx := context.Background() + if lw.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata) + } + if err := lw.storage.GetList(ctx, lw.resourcePrefix, storageOpts, list); err != nil { return nil, err } return list, nil @@ -73,5 +81,9 @@ func (lw *listerWatcher) Watch(options metav1.ListOptions) (watch.Interface, err Recursive: true, ProgressNotify: true, } - return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts) + ctx := context.Background() + if lw.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata) + } + return lw.storage.Watch(ctx, lw.resourcePrefix, opts) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index c26eb55dac..c27ca053b7 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -492,8 +492,7 @@ func (s sortableStoreElements) Swap(i, j int) { // WaitUntilFreshAndList returns list of pointers to `storeElement` objects along // with their ResourceVersion and the name of the index, if any, that was used. -func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) { - var err error +func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) { if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && w.notFresh(resourceVersion) { w.waitingUntilFresh.Add() err = w.waitUntilFreshAndBlock(ctx, resourceVersion) @@ -501,12 +500,14 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion } else { err = w.waitUntilFreshAndBlock(ctx, resourceVersion) } + + defer func() { sort.Sort(sortableStoreElements(result)) }() defer w.RUnlock() if err != nil { - return nil, 0, "", err + return result, rv, index, err } - result, rv, index, err := func() ([]interface{}, uint64, string, error) { + result, rv, index, err = func() ([]interface{}, uint64, string, error) { // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we // want - they will be filtered out later. The fact that we return less things is only further performance improvement. @@ -519,7 +520,6 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion return w.store.List(), w.resourceVersion, "", nil }() - sort.Sort(sortableStoreElements(result)) return result, rv, index, err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go index c455357e04..2b57dd1650 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go @@ -18,6 +18,7 @@ package cacher import ( "fmt" + "sort" "sync" "k8s.io/apimachinery/pkg/fields" @@ -114,9 +115,24 @@ func newCacheInterval(startIndex, endIndex int, indexer indexerFunc, indexValida } } +type sortableWatchCacheEvents []*watchCacheEvent + +func (s sortableWatchCacheEvents) Len() int { + return len(s) +} + +func (s sortableWatchCacheEvents) Less(i, j int) bool { + return s[i].Key < s[j].Key +} + +func (s sortableWatchCacheEvents) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + // newCacheIntervalFromStore is meant to handle the case of rv=0, such that the events // returned by Next() need to be events from a List() done on the underlying store of // the watch cache. +// The items returned in the interval will be sorted by Key. func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc) (*watchCacheInterval, error) { buffer := &watchCacheIntervalBuffer{} allItems := store.List() @@ -140,6 +156,7 @@ func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getA } buffer.endIndex++ } + sort.Sort(sortableWatchCacheEvents(buffer.buffer)) ci := &watchCacheInterval{ startIndex: 0, // Simulate that we already have all the events we're looking for. diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go index f44ca9325b..13f50bc187 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go @@ -21,6 +21,8 @@ import ( "sync" "time" + "google.golang.org/grpc/metadata" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -34,10 +36,11 @@ const ( progressRequestPeriod = 100 * time.Millisecond ) -func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory) *conditionalProgressRequester { +func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory, contextMetadata metadata.MD) *conditionalProgressRequester { pr := &conditionalProgressRequester{ clock: clock, requestWatchProgress: requestWatchProgress, + contextMetadata: contextMetadata, } pr.cond = sync.NewCond(pr.mux.RLocker()) return pr @@ -54,6 +57,7 @@ type TickerFactory interface { type conditionalProgressRequester struct { clock TickerFactory requestWatchProgress WatchProgressRequester + contextMetadata metadata.MD mux sync.RWMutex cond *sync.Cond @@ -63,6 +67,9 @@ type conditionalProgressRequester struct { func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) { ctx := wait.ContextForChannel(stopCh) + if pr.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, pr.contextMetadata) + } go func() { defer utilruntime.HandleCrash() <-stopCh diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index fadc87d53d..1c0307bd91 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -84,7 +84,7 @@ var ( }, []string{"endpoint"}, ) - storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"cluster"}, nil, compbasemetrics.ALPHA, "") + storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"storage_cluster_id"}, nil, compbasemetrics.ALPHA, "") storageMonitor = &monitorCollector{monitorGetter: func() ([]Monitor, error) { return nil, nil }} etcdEventsReceivedCounts = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ @@ -287,21 +287,21 @@ func (c *monitorCollector) CollectWithStability(ch chan<- compbasemetrics.Metric } for i, m := range monitors { - cluster := fmt.Sprintf("etcd-%d", i) + storageClusterID := fmt.Sprintf("etcd-%d", i) - klog.V(4).InfoS("Start collecting storage metrics", "cluster", cluster) + klog.V(4).InfoS("Start collecting storage metrics", "storage_cluster_id", storageClusterID) ctx, cancel := context.WithTimeout(context.Background(), time.Second) metrics, err := m.Monitor(ctx) cancel() m.Close() if err != nil { - klog.InfoS("Failed to get storage metrics", "cluster", cluster, "err", err) + klog.InfoS("Failed to get storage metrics", "storage_cluster_id", storageClusterID, "err", err) continue } - metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), cluster) + metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), storageClusterID) if err != nil { - klog.ErrorS(err, "Failed to create metric", "cluster", cluster) + klog.ErrorS(err, "Failed to create metric", "storage_cluster_id", storageClusterID) } ch <- metric } diff --git a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go index a60986decc..49ef4717cd 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go @@ -187,6 +187,9 @@ type wsStreamCreator struct { // map of stream id to stream; multiple streams read/write the connection streams map[byte]*stream streamsMu sync.Mutex + // setStreamErr holds the error to return to anyone calling setStreams. + // this is populated in closeAllStreamReaders + setStreamErr error } func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator { @@ -202,10 +205,14 @@ func (c *wsStreamCreator) getStream(id byte) *stream { return c.streams[id] } -func (c *wsStreamCreator) setStream(id byte, s *stream) { +func (c *wsStreamCreator) setStream(id byte, s *stream) error { c.streamsMu.Lock() defer c.streamsMu.Unlock() + if c.setStreamErr != nil { + return c.setStreamErr + } c.streams[id] = s + return nil } // CreateStream uses id from passed headers to create a stream over "c.conn" connection. @@ -228,7 +235,11 @@ func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, connWriteLock: &c.connWriteLock, id: id, } - c.setStream(id, s) + if err := c.setStream(id, s); err != nil { + _ = s.writePipe.Close() + _ = s.readPipe.Close() + return nil, err + } return s, nil } @@ -312,7 +323,7 @@ func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, de } // closeAllStreamReaders closes readers in all streams. -// This unblocks all stream.Read() calls. +// This unblocks all stream.Read() calls, and keeps any future streams from being created. func (c *wsStreamCreator) closeAllStreamReaders(err error) { c.streamsMu.Lock() defer c.streamsMu.Unlock() @@ -320,6 +331,12 @@ func (c *wsStreamCreator) closeAllStreamReaders(err error) { // Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes. _ = s.writePipe.CloseWithError(err) } + // ensure callers to setStreams receive an error after this point + if err != nil { + c.setStreamErr = err + } else { + c.setStreamErr = fmt.Errorf("closed all streams") + } } type stream struct { diff --git a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go index 3d464d12d7..39cd2ba288 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go @@ -57,6 +57,7 @@ var ( func Register(registry k8smetrics.KubeRegistry) { registry.Register(healthcheck) registry.Register(healthchecksTotal) + _ = k8smetrics.RegisterProcessStartTime(registry.Register) } func ResetHealthMetrics() { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index fa1242b8a1..6a3f888ab3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -392,7 +392,7 @@ type PersistentVolumeStatus struct { Reason string // LastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index a6f7fef301..1885531f87 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -5141,6 +5141,46 @@ func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerS return allErrs } +// ValidateInitContainerStateTransition test to if any illegal init container state transitions are being attempted +func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, podSpec *core.PodSpec) field.ErrorList { + allErrs := field.ErrorList{} + // If we should always restart, containers are allowed to leave the terminated state + if podSpec.RestartPolicy == core.RestartPolicyAlways { + return allErrs + } + for i, oldStatus := range oldStatuses { + // Skip any container that is not terminated + if oldStatus.State.Terminated == nil { + continue + } + // Skip any container that failed but is allowed to restart + if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == core.RestartPolicyOnFailure { + continue + } + + // Skip any restartable init container that is allowed to restart + isRestartableInitContainer := false + for _, c := range podSpec.InitContainers { + if oldStatus.Name == c.Name { + if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways { + isRestartableInitContainer = true + } + break + } + } + if isRestartableInitContainer { + continue + } + + for _, newStatus := range newStatuses { + if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { + allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state")) + } + } + } + return allErrs +} + // ValidatePodStatusUpdate checks for changes to status that shouldn't occur in normal operation. func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList { fldPath := field.NewPath("metadata") @@ -5162,7 +5202,7 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions // If pod should not restart, make sure the status update does not transition // any terminated containers to a non-terminated state. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...) - allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...) + allErrs = append(allErrs, ValidateInitContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), &oldPod.Spec)...) // The kubelet will never restart ephemeral containers, so treat them like they have an implicit RestartPolicyNever. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), core.RestartPolicyNever)...) allErrs = append(allErrs, validatePodResourceClaimStatuses(newPod.Status.ResourceClaimStatuses, newPod.Spec.ResourceClaims, fldPath.Child("resourceClaimStatuses"))...) diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 48daa70635..b0b12d6ebf 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -616,6 +616,7 @@ const ( // owner: @RomanBednar // kep: https://kep.k8s.io/3762 // alpha: v1.28 + // beta: v1.29 // // Adds a new field to persistent volumes which holds a timestamp of when the volume last transitioned its phase. PersistentVolumeLastPhaseTransitionTime featuregate.Feature = "PersistentVolumeLastPhaseTransitionTime" @@ -1262,6 +1263,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 @@ -1272,6 +1275,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.ZeroLimitedNominalConcurrencyShares: {Default: false, PreRelease: featuregate.Beta}, + genericfeatures.WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go index d8f0f7d0e2..7d0610822d 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go @@ -49,7 +49,8 @@ type NodeScore struct { Score int64 } -// NodeToStatusMap declares map from node name to its status. +// NodeToStatusMap contains the statuses of the Nodes where the incoming Pod was not schedulable. +// A PostFilter plugin that uses this map should interpret absent Nodes as UnschedulableAndUnresolvable. type NodeToStatusMap map[string]*Status // NodePluginScores is a struct with node name and scores for that node. @@ -435,6 +436,8 @@ type FilterPlugin interface { type PostFilterPlugin interface { Plugin // PostFilter is called by the scheduling framework. + // If there is no entry in the NodeToStatus map, its implicit status is UnschedulableAndUnresolvable. + // // A PostFilter plugin should return one of the following statuses: // - Unschedulable: the plugin gets executed successfully but the pod cannot be made schedulable. // - Success: the plugin gets executed successfully and the pod can be made schedulable. diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go index 696ad9b41a..a6b0d08bcd 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go @@ -279,8 +279,16 @@ type WeightedAffinityTerm struct { Weight int32 } +// ExtenderName is a fake plugin name put in UnschedulablePlugins when Extender rejected some Nodes. +const ExtenderName = "Extender" + // Diagnosis records the details to diagnose a scheduling failure. type Diagnosis struct { + // NodeToStatusMap records the status of each retriable node (status Unschedulable) + // if they're rejected in PreFilter (via PreFilterResult) or Filter plugins. + // Nodes that pass PreFilter/Filter plugins are not included in this map. + // While this map may contain UnschedulableAndUnresolvable statuses, the absence of + // a node should be interpreted as UnschedulableAndUnresolvable. NodeToStatusMap NodeToStatusMap // UnschedulablePlugins are plugins that returns Unschedulable or UnschedulableAndUnresolvable. UnschedulablePlugins sets.Set[string] diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go index d4871e70d7..220acad24e 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go @@ -124,7 +124,7 @@ var ( // Start with 10ms with the last bucket being [~88m, Inf). Buckets: metrics.ExponentialBuckets(0.01, 2, 20), StabilityLevel: metrics.STABLE, - DeprecatedVersion: "1.28.0", + DeprecatedVersion: "1.29.0", }, []string{"attempts"}) diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go index 39673a9589..ef99bd3bc4 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go @@ -72,9 +72,8 @@ func (fs *DefaultFs) Rename(oldpath, newpath string) error { return os.Rename(oldpath, newpath) } -// MkdirAll via os.MkdirAll func (fs *DefaultFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(fs.prefix(path), perm) + return MkdirAll(fs.prefix(path), perm) } // MkdirAllWithPathCheck checks if path exists already. If not, it creates a directory @@ -97,7 +96,7 @@ func MkdirAllWithPathCheck(path string, perm os.FileMode) error { return fmt.Errorf("path %v exists but is not a directory", path) } // If existence of path not known, attempt to create it. - if err := os.MkdirAll(path, perm); err != nil { + if err := MkdirAll(path, perm); err != nil { return err } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go index df887f9450..07175cc103 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go @@ -35,3 +35,13 @@ func IsUnixDomainSocket(filePath string) (bool, error) { } return true, nil } + +// Chmod is the same as os.Chmod on Linux. +func Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +// MkdirAll is the same as os.MkdirAll on Linux. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go index cd6a11ed30..2365e6e7d8 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go @@ -27,6 +27,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" + + "golang.org/x/sys/windows" ) const ( @@ -85,3 +87,157 @@ func IsUnixDomainSocket(filePath string) (bool, error) { } return true, nil } + +// On Windows os.Mkdir all doesn't set any permissions so call the Chown function below to set +// permissions once the directory is created. +func MkdirAll(path string, perm os.FileMode) error { + klog.V(6).InfoS("Function MkdirAll starts", "path", path, "perm", perm) + err := os.MkdirAll(path, perm) + if err != nil { + return fmt.Errorf("Error creating directory %s: %v", path, err) + } + + err = Chmod(path, perm) + if err != nil { + return fmt.Errorf("Error setting permissions for directory %s: %v", path, err) + } + + return nil +} + +const ( + // These aren't defined in the syscall package for Windows :( + USER_READ = 0x100 + USER_WRITE = 0x80 + USER_EXECUTE = 0x40 + GROUP_READ = 0x20 + GROUP_WRITE = 0x10 + GROUP_EXECUTE = 0x8 + OTHERS_READ = 0x4 + OTHERS_WRITE = 0x2 + OTHERS_EXECUTE = 0x1 + USER_ALL = USER_READ | USER_WRITE | USER_EXECUTE + GROUP_ALL = GROUP_READ | GROUP_WRITE | GROUP_EXECUTE + OTHERS_ALL = OTHERS_READ | OTHERS_WRITE | OTHERS_EXECUTE +) + +// On Windows os.Chmod only sets the read-only flag on files, so we need to use Windows APIs to set the desired access on files / directories. +// The OWNER mode will set file permissions for the file owner SID, the GROUP mode will set file permissions for the file group SID, +// and the OTHERS mode will set file permissions for BUILTIN\Users. +// Please note that Windows containers can be run as one of two user accounts; ContainerUser or ContainerAdministrator. +// Containers run as ContainerAdministrator will inherit permissions from BUILTIN\Administrators, +// while containers run as ContainerUser will inherit permissions from BUILTIN\Users. +// Windows containers do not have the ability to run as a custom user account that is known to the host so the OTHERS group mode +// is used to grant / deny permissions of files on the hosts to the ContainerUser account. +func Chmod(path string, filemode os.FileMode) error { + klog.V(6).InfoS("Function Chmod starts", "path", path, "filemode", filemode) + // Get security descriptor for the file + sd, err := windows.GetNamedSecurityInfo( + path, + windows.SE_FILE_OBJECT, + windows.DACL_SECURITY_INFORMATION|windows.PROTECTED_DACL_SECURITY_INFORMATION|windows.OWNER_SECURITY_INFORMATION|windows.GROUP_SECURITY_INFORMATION) + if err != nil { + return fmt.Errorf("Error getting security descriptor for file %s: %v", path, err) + } + + // Get owner SID from the security descriptor for assigning USER permissions + owner, _, err := sd.Owner() + if err != nil { + return fmt.Errorf("Error getting owner SID for file %s: %v", path, err) + } + ownerString := owner.String() + + // Get the group SID from the security descriptor for assigning GROUP permissions + group, _, err := sd.Group() + if err != nil { + return fmt.Errorf("Error getting group SID for file %s: %v", path, err) + } + groupString := group.String() + + mask := uint32(windows.ACCESS_MASK(filemode)) + + // Build a new Discretionary Access Control List (DACL) with the desired permissions using + //the Security Descriptor Definition Language (SDDL) format. + // https://learn.microsoft.com/windows/win32/secauthz/security-descriptor-definition-language + // the DACL is a list of Access Control Entries (ACEs) where each ACE represents the permissions (Allow or Deny) for a specific SID. + // Each ACE has the following format: + // (AceType;AceFlags;Rights;ObjectGuid;InheritObjectGuid;AccountSid) + // We can leave ObjectGuid and InheritObjectGuid empty for our purposes. + + dacl := "D:" + + // build the owner ACE + dacl += "(A;OICI;" + if mask&USER_ALL == USER_ALL { + dacl += "FA" + } else { + if mask&USER_READ == USER_READ { + dacl += "FR" + } + if mask&USER_WRITE == USER_WRITE { + dacl += "FW" + } + if mask&USER_EXECUTE == USER_EXECUTE { + dacl += "FX" + } + } + dacl += ";;;" + ownerString + ")" + + // Build the group ACE + dacl += "(A;OICI;" + if mask&GROUP_ALL == GROUP_ALL { + dacl += "FA" + } else { + if mask&GROUP_READ == GROUP_READ { + dacl += "FR" + } + if mask&GROUP_WRITE == GROUP_WRITE { + dacl += "FW" + } + if mask&GROUP_EXECUTE == GROUP_EXECUTE { + dacl += "FX" + } + } + dacl += ";;;" + groupString + ")" + + // Build the others ACE + dacl += "(A;OICI;" + if mask&OTHERS_ALL == OTHERS_ALL { + dacl += "FA" + } else { + if mask&OTHERS_READ == OTHERS_READ { + dacl += "FR" + } + if mask&OTHERS_WRITE == OTHERS_WRITE { + dacl += "FW" + } + if mask&OTHERS_EXECUTE == OTHERS_EXECUTE { + dacl += "FX" + } + } + dacl += ";;;BU)" + + klog.V(6).InfoS("Setting new DACL for path", "path", path, "dacl", dacl) + + // create a new security descriptor from the DACL string + newSD, err := windows.SecurityDescriptorFromString(dacl) + if err != nil { + return fmt.Errorf("Error creating new security descriptor from DACL string: %v", err) + } + + // get the DACL in binary format from the newly created security descriptor + newDACL, _, err := newSD.DACL() + if err != nil { + return fmt.Errorf("Error getting DACL from new security descriptor: %v", err) + } + + // Write the new security descriptor to the file + return windows.SetNamedSecurityInfo( + path, + windows.SE_FILE_OBJECT, + windows.DACL_SECURITY_INFORMATION|windows.PROTECTED_DACL_SECURITY_INFORMATION, + nil, // owner SID + nil, // group SID + newDACL, + nil) // SACL +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index 94c2330afc..dcccb56f10 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -1064,7 +1064,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { Containers: []v1.Container{ { Name: "pv-recycler", - Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.0", + Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.3", Command: []string{"/bin/sh"}, Args: []string{"-c", "test -e /scrub && find /scrub -mindepth 1 -delete && test -z \"$(ls -A /scrub)\" || exit 1"}, VolumeMounts: []v1.VolumeMount{ diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go b/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go index af309353ba..238e919b33 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go @@ -102,6 +102,23 @@ func IsFailedPreconditionError(err error) bool { return errors.As(err, &failedPreconditionError) } +type OperationNotSupported struct { + msg string +} + +func (err *OperationNotSupported) Error() string { + return err.msg +} + +func NewOperationNotSupportedError(msg string) *OperationNotSupported { + return &OperationNotSupported{msg: msg} +} + +func IsOperationNotSupportedError(err error) bool { + var operationNotSupportedError *OperationNotSupported + return errors.As(err, &operationNotSupportedError) +} + // TransientOperationFailure indicates operation failed with a transient error // and may fix itself when retried. type TransientOperationFailure struct { diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml index ab558f9147..019d7cfc7b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml @@ -1,5 +1,5 @@ # This DaemonSet was originally referenced from -# https://github.com/GoogleCloudPlatform/container-engine-accelerators/blob/master/daemonset.yaml +# https://github.com/GoogleCloudPlatform/container-engine-accelerators/blob/master/nvidia-driver-installer/cos/daemonset-preloaded.yaml # The Dockerfile and other source for this daemonset are in # https://github.com/GoogleCloudPlatform/cos-gpu-installer @@ -47,17 +47,43 @@ spec: - name: root-mount hostPath: path: / + - name: cos-tools + hostPath: + path: /var/lib/cos-tools + - name: nvidia-config + hostPath: + path: /etc/nvidia initContainers: + - image: "ubuntu@sha256:3f85b7caad41a95462cf5b787d8a04604c8262cdcdf9a472b8c52ef83375fe15" + name: bind-mount-install-dir + securityContext: + privileged: true + command: + - nsenter + - -at + - '1' + - -- + - sh + - -c + - | + if mountpoint -q /var/lib/nvidia; then + echo "The mountpoint /var/lib/nvidia exists." + else + echo "The mountpoint /var/lib/nvidia does not exist. Creating directories /home/kubernetes/bin/nvidia and /var/lib/nvidia and bind mount." + mkdir -p /var/lib/nvidia /home/kubernetes/bin/nvidia + mount --bind /home/kubernetes/bin/nvidia /var/lib/nvidia + echo "Done creating bind mounts" + fi # The COS GPU installer image version may be dependent on the version of COS being used. # Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/ # and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS. - # Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.1.9 - suitable for COS M109 as per https://cloud.google.com/container-optimized-os/docs/release-notes - - image: gcr.io/cos-cloud/cos-gpu-installer:v2.1.9 + # Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.1.10 - suitable for COS M109 as per https://cloud.google.com/container-optimized-os/docs/release-notes + - image: "gcr.io/cos-cloud/cos-gpu-installer:v2.1.10" name: nvidia-driver-installer resources: requests: - cpu: 0.15 + cpu: 150m securityContext: privileged: true env: @@ -71,6 +97,10 @@ spec: value: /etc/vulkan/icd.d - name: ROOT_MOUNT_DIR value: /root + - name: COS_TOOLS_DIR_HOST + value: /var/lib/cos-tools + - name: COS_TOOLS_DIR_CONTAINER + value: /build/cos-tools volumeMounts: - name: nvidia-install-dir-host mountPath: /usr/local/nvidia @@ -80,6 +110,37 @@ spec: mountPath: /dev - name: root-mount mountPath: /root + - name: cos-tools + mountPath: /build/cos-tools + command: + - bash + - -c + - | + echo "Checking for existing GPU driver modules" + if lsmod | grep nvidia; then + echo "GPU driver is already installed, the installed version may or may not be the driver version being tried to install, skipping installation" + exit 0 + else + echo "No GPU driver module detected, installing now" + /cos-gpu-installer install + fi + - image: "gcr.io/gke-release/nvidia-partition-gpu@sha256:e226275da6c45816959fe43cde907ee9a85c6a2aa8a429418a4cadef8ecdb86a" + name: partition-gpus + env: + - name: LD_LIBRARY_PATH + value: /usr/local/nvidia/lib64 + resources: + requests: + cpu: 150m + securityContext: + privileged: true + volumeMounts: + - name: nvidia-install-dir-host + mountPath: /usr/local/nvidia + - name: dev + mountPath: /dev + - name: nvidia-config + mountPath: /etc/nvidia containers: - image: "registry.k8s.io/pause:3.9" name: pause diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml index e47e6fed26..3d33dfc066 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v3.4.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v4.5.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml index 8f806887f4..bdd93b894d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.4.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.11.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml index ba65d9e7f5..d80b5d793b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v3.1.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v4.0.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external @@ -61,6 +61,13 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch"] + # (Alpha) Access to referencegrants is only needed when the CSI driver + # has the CrossNamespaceVolumeDataSource controller capability. + # In that case, external-provisioner requires "get", "list", "watch" + # permissions for "referencegrants" on "gateway.networking.k8s.io". + #- apiGroups: ["gateway.networking.k8s.io"] + # resources: ["referencegrants"] + # verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding @@ -89,9 +96,6 @@ metadata: rules: # Only one of the following rules for endpoints or leases is required based on # what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases. -- apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml index 410d14ff90..4f8f7980d4 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.4.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.10.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external @@ -46,6 +46,10 @@ rules: - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] + # only required if enabling the alpha volume modify feature + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattributesclasses"] + verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding @@ -63,7 +67,7 @@ roleRef: apiGroup: rbac.authorization.k8s.io --- -# Resizer must be able to work with end point in current namespace +# Resizer must be able to work with `leases` in current namespace # if (and only if) leadership election is enabled kind: Role apiVersion: rbac.authorization.k8s.io/v1 diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml index 335538d44b..7b5f5ad34d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v5.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v7.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # Together with the RBAC file for external-provisioner, this YAML file @@ -12,6 +12,7 @@ # - optionally rename the non-namespaced ClusterRole if there # are conflicts with other deployments +--- apiVersion: v1 kind: ServiceAccount metadata: @@ -37,13 +38,24 @@ rules: - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch", "create"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + verbs: ["get", "list", "watch", "update", "patch", "create"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] verbs: ["update", "patch"] - + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents/status"] + verbs: ["update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md index 399316db2b..ce3a7694e1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md @@ -1,4 +1,4 @@ The files in this directory are exact copies of "kubernetes-latest" in -https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/ +https://github.com/kubernetes-csi/csi-driver-host-path/tree/release-1.13/deploy/ Do not edit manually. Run ./update-hostpath.sh to refresh the content. diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml index c8cf666a47..0250a52c1a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml @@ -15,3 +15,6 @@ spec: # To determine at runtime which mode a volume uses, pod info and its # "csi.storage.k8s.io/ephemeral" entry are needed. podInfoOnMount: true + # Kubernetes may use fsGroup to change permissions and ownership + # of the volume to match user requested fsGroup in the pod's SecurityPolicy + fsGroupPolicy: File diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml index 6a41a02391..eb4c163484 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml @@ -1,4 +1,4 @@ -# All of the individual sidecar RBAC roles get bound + # All of the individual sidecar RBAC roles get bound # to this account. kind: ServiceAccount apiVersion: v1 @@ -190,6 +190,7 @@ kind: StatefulSet apiVersion: apps/v1 metadata: name: csi-hostpathplugin + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -218,7 +219,7 @@ spec: serviceAccountName: csi-hostpathplugin-sa containers: - name: hostpath - image: registry.k8s.io/sig-storage/hostpathplugin:v1.11.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - "--drivername=hostpath.csi.k8s.io" - "--v=5" @@ -261,7 +262,7 @@ spec: name: dev-dir - name: csi-external-health-monitor-controller - image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0 + image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.11.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -275,7 +276,7 @@ spec: mountPath: /csi - name: node-driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -303,13 +304,13 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir - image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0 + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 args: - --csi-address=/csi/csi.sock - --health-port=9898 - name: csi-attacher - image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0 + image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -323,11 +324,12 @@ spec: name: socket-dir - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - -v=5 - --csi-address=/csi/csi.sock - --feature-gates=Topology=true + # end csi-provisioner args securityContext: # This is necessary only for systems with SELinux, where # non-privileged sidecar containers cannot access unix domain socket @@ -338,7 +340,7 @@ spec: name: socket-dir - name: csi-resizer - image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0 + image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0 args: - -v=5 - -csi-address=/csi/csi.sock @@ -352,7 +354,7 @@ spec: name: socket-dir - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1 args: - -v=5 - --csi-address=/csi/csi.sock diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml index 7253a70a9d..02e5e8d7c1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml @@ -11,6 +11,7 @@ apiVersion: v1 kind: Service metadata: name: hostpath-service + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -30,6 +31,7 @@ kind: StatefulSet apiVersion: apps/v1 metadata: name: csi-hostpath-socat + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -64,7 +66,9 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: socat - image: docker.io/alpine/socat:1.7.4.3-r0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 + command: + - socat args: - tcp-listen:10000,fork,reuseaddr - unix-connect:/csi/csi.sock diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml index 7684aea78c..b17687c617 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-attacher - image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0 + image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0 args: - --v=5 - --csi-address=$(ADDRESS) diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml index 242c0b5aa7..ef85e3170f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-resizer - image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0 + image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml index f788a4a8f2..6718060ec9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1 args: - "--v=5" - "--csi-address=$(ADDRESS)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index 4493deccc0..3035532c5d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - "--csi-address=$(ADDRESS)" # Topology support is needed for the pod rescheduling test @@ -34,7 +34,7 @@ spec: - mountPath: /csi name: socket-dir - name: driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -53,7 +53,7 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - "--drivername=mock.storage.k8s.io" - "--nodeid=$(KUBE_NODE_NAME)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml index d1aa8ece86..189c17af67 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - "--csi-address=$(ADDRESS)" # Topology support is needed for the pod rescheduling test @@ -35,7 +35,7 @@ spec: - mountPath: /csi name: socket-dir - name: driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -53,7 +53,7 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - -v=5 - -nodeid=$(KUBE_NODE_NAME) diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh index ce60b39bc3..122de0d18b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh @@ -137,5 +137,5 @@ done grep -r image: hostpath/hostpath/csi-hostpath-plugin.yaml | while read -r image; do version=$(echo "$image" | sed -e 's/.*:\(.*\)/\1/') image=$(echo "$image" | sed -e 's/.*image: \([^:]*\).*/\1/') - sed -i -e "s;$image:.*;$image:$version;" mock/*.yaml + sed -i '' -e "s;$image:.*;$image:$version;" mock/*.yaml done diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 4175c93940..4e4118876e 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -232,7 +232,7 @@ const ( func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) { configs := map[ImageID]Config{} - configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.45"} + configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.47"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} @@ -241,8 +241,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.4"} - configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.10-0"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.5.9"} + configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.16-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"} diff --git a/vendor/k8s.io/mount-utils/mount_helper_unix.go b/vendor/k8s.io/mount-utils/mount_helper_unix.go index 9193e7c8d2..1c603dca7a 100644 --- a/vendor/k8s.io/mount-utils/mount_helper_unix.go +++ b/vendor/k8s.io/mount-utils/mount_helper_unix.go @@ -61,7 +61,13 @@ func IsCorruptedMnt(err error) bool { underlyingError = err } - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES || underlyingError == syscall.EHOSTDOWN || underlyingError == syscall.EWOULDBLOCK + return errors.Is(underlyingError, syscall.ENOTCONN) || + errors.Is(underlyingError, syscall.ESTALE) || + errors.Is(underlyingError, syscall.EIO) || + errors.Is(underlyingError, syscall.EACCES) || + errors.Is(underlyingError, syscall.EHOSTDOWN) || + errors.Is(underlyingError, syscall.EWOULDBLOCK) || + errors.Is(underlyingError, syscall.ENODEV) } // MountInfo represents a single line in /proc//mountinfo. diff --git a/vendor/modules.txt b/vendor/modules.txt index 736b2e1ea5..bd238c2eea 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -285,7 +285,7 @@ github.com/opencontainers/go-digest ## explicit; go 1.19 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/oracle/oci-go-sdk/v65 v65.77.0 +# github.com/oracle/oci-go-sdk/v65 v65.79.0 ## explicit; go 1.13 github.com/oracle/oci-go-sdk/v65/common github.com/oracle/oci-go-sdk/v65/common/auth @@ -419,6 +419,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry ## explicit; go 1.20 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded +go.opentelemetry.io/otel/metric/noop # go.opentelemetry.io/otel/sdk v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/sdk @@ -471,7 +472,7 @@ golang.org/x/exp/slices # golang.org/x/mod v0.14.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.21.0 +# golang.org/x/net v0.23.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -699,7 +700,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.1 => k8s.io/api v0.29.1 +# k8s.io/api v0.29.11 => k8s.io/api v0.29.11 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -755,7 +756,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.29.1 => k8s.io/apiextensions-apiserver v0.29.1 +# k8s.io/apiextensions-apiserver v0.29.11 => k8s.io/apiextensions-apiserver v0.29.11 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -767,7 +768,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.29.1 => k8s.io/apimachinery v0.29.1 +# k8s.io/apimachinery v0.29.11 => k8s.io/apimachinery v0.29.11 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -830,7 +831,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.1 => k8s.io/apiserver v0.29.1 +# k8s.io/apiserver v0.29.11 => k8s.io/apiserver v0.29.11 ## explicit; go 1.21 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -978,7 +979,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v1.5.2 => k8s.io/client-go v0.29.1 +# k8s.io/client-go v1.5.2 => k8s.io/client-go v0.29.11 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1304,7 +1305,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.29.1 => k8s.io/cloud-provider v0.29.1 +# k8s.io/cloud-provider v0.29.11 => k8s.io/cloud-provider v0.29.11 ## explicit; go 1.21 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1327,7 +1328,7 @@ k8s.io/cloud-provider/options k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.29.1 => k8s.io/component-base v0.29.1 +# k8s.io/component-base v0.29.11 => k8s.io/component-base v0.29.11 ## explicit; go 1.21 k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -1356,14 +1357,14 @@ k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.29.1 => k8s.io/component-helpers v0.29.1 +# k8s.io/component-helpers v0.29.11 => k8s.io/component-helpers v0.29.11 ## explicit; go 1.21 k8s.io/component-helpers/node/topology k8s.io/component-helpers/node/util k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity -# k8s.io/controller-manager v0.29.1 => k8s.io/controller-manager v0.29.1 +# k8s.io/controller-manager v0.29.11 => k8s.io/controller-manager v0.29.11 ## explicit; go 1.21 k8s.io/controller-manager/app k8s.io/controller-manager/config @@ -1380,7 +1381,7 @@ k8s.io/controller-manager/pkg/informerfactory k8s.io/controller-manager/pkg/leadermigration k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options -# k8s.io/csi-translation-lib v0.29.1 => k8s.io/csi-translation-lib v0.29.1 +# k8s.io/csi-translation-lib v0.29.11 => k8s.io/csi-translation-lib v0.29.11 ## explicit; go 1.21 # k8s.io/klog v1.0.0 ## explicit; go 1.12 @@ -1394,7 +1395,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kms v0.29.1 => k8s.io/kms v0.29.1 +# k8s.io/kms v0.29.11 => k8s.io/kms v0.29.11 ## explicit; go 1.21 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 @@ -1421,16 +1422,16 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.29.1 +# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.29.11 ## explicit; go 1.21 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.29.1 => k8s.io/kubectl v0.29.1 +# k8s.io/kubectl v0.29.11 => k8s.io/kubectl v0.29.11 ## explicit; go 1.21 k8s.io/kubectl/pkg/scale -# k8s.io/kubelet v0.29.1 => k8s.io/kubelet v0.29.1 +# k8s.io/kubelet v0.29.11 => k8s.io/kubelet v0.29.11 ## explicit; go 1.21 k8s.io/kubelet/pkg/apis -# k8s.io/kubernetes v1.29.1 => k8s.io/kubernetes v1.29.1 +# k8s.io/kubernetes v1.29.11 => k8s.io/kubernetes v1.29.11 ## explicit; go 1.21 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1489,7 +1490,7 @@ k8s.io/kubernetes/test/e2e/framework/testfiles k8s.io/kubernetes/test/e2e/testing-manifests k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/image -# k8s.io/mount-utils v0.29.1 => k8s.io/mount-utils v0.29.1 +# k8s.io/mount-utils v0.29.11 => k8s.io/mount-utils v0.29.11 ## explicit; go 1.21 k8s.io/mount-utils # k8s.io/utils v0.0.0-20230726121419-3b25d923346b @@ -1511,7 +1512,7 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.3 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 ## explicit; go 1.20 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics @@ -1540,32 +1541,24 @@ sigs.k8s.io/yaml/goyaml.v2 # github.com/docker/docker => github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb # github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0 # google.golang.org/grpc => google.golang.org/grpc v1.60.1 -# k8s.io/api => k8s.io/api v0.29.1 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.1 -# k8s.io/apimachinery => k8s.io/apimachinery v0.29.1 -# k8s.io/apiserver => k8s.io/apiserver v0.29.1 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.1 -# k8s.io/client-go => k8s.io/client-go v0.29.1 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.1 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.1 -# k8s.io/code-generator => k8s.io/code-generator v0.29.1 -# k8s.io/component-base => k8s.io/component-base v0.29.1 -# k8s.io/component-helpers => k8s.io/component-helpers v0.29.1 -# k8s.io/controller-manager => k8s.io/controller-manager v0.29.1 -# k8s.io/cri-api => k8s.io/cri-api v0.29.1 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.1 -# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.1 +# k8s.io/api => k8s.io/api v0.29.11 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.11 +# k8s.io/apimachinery => k8s.io/apimachinery v0.29.11 +# k8s.io/apiserver => k8s.io/apiserver v0.29.11 +# k8s.io/client-go => k8s.io/client-go v0.29.11 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.11 +# k8s.io/component-base => k8s.io/component-base v0.29.11 +# k8s.io/component-helpers => k8s.io/component-helpers v0.29.11 +# k8s.io/controller-manager => k8s.io/controller-manager v0.29.11 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.11 +# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.11 # k8s.io/endpointslice => k8s.io/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20230810203337-add7e14df11e -# k8s.io/kms => k8s.io/kms v0.29.1 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.1 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.1 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.1 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.1 -# k8s.io/kubectl => k8s.io/kubectl v0.29.1 -# k8s.io/kubelet => k8s.io/kubelet v0.29.1 -# k8s.io/kubernetes => k8s.io/kubernetes v1.29.1 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.1 -# k8s.io/metrics => k8s.io/metrics v0.29.1 -# k8s.io/mount-utils => k8s.io/mount-utils v0.29.1 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.1 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.1 +# k8s.io/kms => k8s.io/kms v0.29.11 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.11 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.11 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.11 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.11 +# k8s.io/kubectl => k8s.io/kubectl v0.29.11 +# k8s.io/kubelet => k8s.io/kubelet v0.29.11 +# k8s.io/kubernetes => k8s.io/kubernetes v1.29.11 +# k8s.io/mount-utils => k8s.io/mount-utils v0.29.11