From 52d1afc96517e1b6ee06e510eb7d7cd67a163894 Mon Sep 17 00:00:00 2001 From: l-technicore Date: Thu, 28 Nov 2024 11:25:54 +0530 Subject: [PATCH] Release 1.28.2 Security patching, updated Makefile, Readme and Third Party Licences --- .github/workflows/makefile.yml | 9 +- .github/workflows/release.yaml | 4 +- Dockerfile | 9 +- Dockerfile_arm_all | 9 +- Makefile | 2 +- README.md | 11 +- THIRD_PARTY_LICENSES.txt | 12 +- go.mod | 90 ++-- go.sum | 76 +-- .../oci-cloud-controller-manager.yaml | 2 +- .../oci-csi-controller-driver.yaml | 2 +- .../oci-csi-node-driver.yaml | 2 +- .../oci-csi-node-rbac.yaml | 3 + .../oci-flexvolume-driver.yaml | 4 +- .../oci-volume-provisioner-fss.yaml | 2 +- .../oci-volume-provisioner.yaml | 2 +- pkg/cloudprovider/providers/oci/instances.go | 2 +- pkg/oci/client/client.go | 70 +-- pkg/oci/client/client_factory.go | 4 +- pkg/oci/client/identity.go | 18 + pkg/volume/provisioner/core/provisioner.go | 2 +- test/e2e/cloud-provider-oci/fss_dynamic.go | 2 +- vendor/golang.org/x/net/http2/frame.go | 31 ++ vendor/golang.org/x/net/http2/pipe.go | 11 +- vendor/golang.org/x/net/http2/server.go | 13 +- vendor/golang.org/x/net/http2/testsync.go | 331 ++++++++++++ vendor/golang.org/x/net/http2/transport.go | 307 ++++++++--- vendor/golang.org/x/net/websocket/client.go | 55 +- vendor/golang.org/x/net/websocket/dial.go | 11 +- vendor/golang.org/x/sys/execabs/execabs.go | 102 ---- .../golang.org/x/sys/execabs/execabs_go118.go | 17 - .../golang.org/x/sys/execabs/execabs_go119.go | 20 - .../tools/go/internal/packagesdriver/sizes.go | 24 +- vendor/golang.org/x/tools/go/packages/doc.go | 36 +- .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 92 +--- .../x/tools/go/packages/golist_overlay.go | 492 ------------------ .../x/tools/go/packages/packages.go | 189 +++---- .../x/tools/go/types/objectpath/objectpath.go | 130 ++--- .../x/tools/internal/event/keys/util.go | 21 + .../x/tools/internal/gcimporter/gcimporter.go | 3 +- .../x/tools/internal/gcimporter/iexport.go | 31 +- .../x/tools/internal/gcimporter/iimport.go | 39 +- .../x/tools/internal/gocommand/invoke.go | 27 +- .../internal/packagesinternal/packages.go | 8 - .../x/tools/internal/typeparams/common.go | 24 +- .../x/tools/internal/typeparams/coretype.go | 16 +- .../internal/typeparams/enabled_go117.go | 12 - .../internal/typeparams/enabled_go118.go | 15 - .../x/tools/internal/typeparams/normalize.go | 20 +- .../x/tools/internal/typeparams/termlist.go | 2 +- .../internal/typeparams/typeparams_go117.go | 197 ------- .../internal/typeparams/typeparams_go118.go | 151 ------ .../x/tools/internal/typeparams/typeterm.go | 9 +- .../x/tools/internal/typesinternal/types.go | 16 - .../x/tools/internal/versions/gover.go | 172 ++++++ .../x/tools/internal/versions/types.go | 19 + .../x/tools/internal/versions/types_go121.go | 20 + .../x/tools/internal/versions/types_go122.go | 24 + .../tools/internal/versions/versions_go121.go | 49 ++ .../tools/internal/versions/versions_go122.go | 38 ++ .../admissionregistration/v1alpha1/types.go | 4 +- .../admissionregistration/v1beta1/types.go | 4 +- vendor/k8s.io/api/batch/v1/types.go | 6 +- .../pkg/admission/plugin/cel/composition.go | 2 +- .../controller_reconcile.go | 5 +- .../endpoints/discovery/aggregated/handler.go | 2 +- .../apiserver/pkg/features/kube_features.go | 17 + vendor/k8s.io/apiserver/pkg/server/config.go | 2 +- .../server/filters/priority-and-fairness.go | 70 ++- .../pkg/server/options/recommended.go | 1 - .../apiserver/pkg/storage/cacher/cacher.go | 24 +- .../pkg/storage/cacher/lister_watcher.go | 30 +- .../pkg/storage/cacher/watch_cache.go | 10 +- .../pkg/storage/cacher/watch_progress.go | 9 +- .../pkg/storage/etcd3/metrics/metrics.go | 12 +- .../pkg/util/flowcontrol/apf_controller.go | 34 +- .../pkg/util/flowcontrol/apf_filter.go | 5 - .../util/flowcontrol/fairqueuing/interface.go | 7 +- .../fairqueuing/queueset/queueset.go | 77 +-- .../pkg/util/flowcontrol/metrics/metrics.go | 16 + vendor/k8s.io/cloud-provider/cloud.go | 7 +- .../node_lifecycle_controller.go | 67 ++- .../metrics/prometheus/slis/metrics.go | 1 + .../pkg/apis/core/validation/validation.go | 42 +- .../kubernetes/pkg/features/kube_features.go | 9 +- .../pkg/scheduler/framework/interface.go | 5 +- .../pkg/scheduler/framework/types.go | 6 + .../kubernetes/pkg/volume/util/types/types.go | 17 + .../scheduling/nvidia-driver-installer.yaml | 69 ++- .../storage-csi/external-attacher/rbac.yaml | 4 +- .../rbac.yaml | 4 +- .../external-provisioner/rbac.yaml | 14 +- .../storage-csi/external-resizer/rbac.yaml | 10 +- .../csi-snapshotter/rbac-csi-snapshotter.yaml | 20 +- .../storage-csi/hostpath/README.md | 2 +- .../hostpath/csi-hostpath-driverinfo.yaml | 3 + .../hostpath/csi-hostpath-plugin.yaml | 20 +- .../hostpath/csi-hostpath-testing.yaml | 6 +- .../mock/csi-mock-driver-attacher.yaml | 2 +- .../mock/csi-mock-driver-resizer.yaml | 2 +- .../mock/csi-mock-driver-snapshotter.yaml | 2 +- .../storage-csi/mock/csi-mock-driver.yaml | 6 +- .../storage-csi/mock/csi-mock-proxy.yaml | 6 +- .../storage-csi/update-hostpath.sh | 2 +- .../kubernetes/test/utils/image/manifest.go | 6 +- .../k8s.io/mount-utils/mount_helper_unix.go | 8 +- vendor/modules.txt | 98 ++-- 108 files changed, 1955 insertions(+), 1933 deletions(-) create mode 100644 vendor/golang.org/x/net/http2/testsync.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/util.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go create mode 100644 vendor/golang.org/x/tools/internal/versions/gover.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go create mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go122.go diff --git a/.github/workflows/makefile.yml b/.github/workflows/makefile.yml index ad30f6ef3c..39793ff8f2 100644 --- a/.github/workflows/makefile.yml +++ b/.github/workflows/makefile.yml @@ -17,13 +17,16 @@ jobs: - name: Check out code into the Go module directory uses: actions/checkout@v2 - + - name: Install dependencies + run: | + go mod download - name: Run Unit Tests run: | go test -covermode=count -coverprofile=profile.cov ./pkg/... + - name: Install goveralls + run: go install github.com/mattn/goveralls@latest - name: Send coverage env: COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - GO111MODULE=off go get github.com/mattn/goveralls - $(go env GOPATH)/bin/goveralls -coverprofile=profile.cov -service=github + goveralls -coverprofile=profile.cov -service=github diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 56591c42cb..3e569eaf20 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -26,7 +26,7 @@ jobs: run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${GITHUB_ACTOR,,} --password-stdin - name: Build Image - run: OSS_REGISTRY="ghcr.io/oracle" VERSION="${{ github.ref_name }}" make image + run: OSS_REGISTRY="ghcr.io/${GITHUB_REPOSITORY_OWNER,,}" VERSION="${{ github.ref_name }}" make image - name: Push Image - run: OSS_REGISTRY="ghcr.io/oracle" VERSION="${{ github.ref_name }}" make docker-push-all + run: OSS_REGISTRY="ghcr.io/${GITHUB_REPOSITORY_OWNER,,}" VERSION="${{ github.ref_name }}" make docker-push-all diff --git a/Dockerfile b/Dockerfile index d3851d7e0c..7a2b452559 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,15 +27,14 @@ WORKDIR $SRC RUN COMPONENT=${COMPONENT} make clean build -FROM oraclelinux:7-slim +FROM ghcr.io/oracle/oraclelinux:8-slim-fips COPY --from=0 /go/src/github.com/oracle/oci-cloud-controller-manager/dist/* /usr/local/bin/ COPY --from=0 /go/src/github.com/oracle/oci-cloud-controller-manager/image/* /usr/local/bin/ -RUN yum install -y util-linux \ - && yum install -y e2fsprogs \ - && yum install -y xfsprogs \ - && yum clean all +RUN microdnf -y install util-linux e2fsprogs xfsprogs python2 && \ + microdnf update && \ + microdnf clean all COPY scripts/encrypt-mount /sbin/encrypt-mount COPY scripts/encrypt-umount /sbin/encrypt-umount diff --git a/Dockerfile_arm_all b/Dockerfile_arm_all index 37aefcf5a7..13e4d85d45 100644 --- a/Dockerfile_arm_all +++ b/Dockerfile_arm_all @@ -14,12 +14,11 @@ WORKDIR $SRC RUN ARCH=arm make clean build-arm-all -FROM arm64v8/oraclelinux:7-slim +FROM ghcr.io/oracle/oraclelinux:8-slim-fips-arm64v8 -RUN yum install -y util-linux \ - && yum install -y e2fsprogs \ - && yum clean all - \ +RUN microdnf -y install util-linux e2fsprogs xfsprogs python2 && \ + microdnf update && \ + microdnf clean all COPY scripts/encrypt-mount /sbin/encrypt-mount COPY scripts/encrypt-umount /sbin/encrypt-umount diff --git a/Makefile b/Makefile index 6482f36bcd..b9cbfdc42b 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ else VERSION ?= ${VERSION} endif -RELEASE = v1.28.1 +RELEASE = v1.28.2 GOOS ?= linux ARCH ?= amd64 diff --git a/README.md b/README.md index 7f1050cc99..9babc62253 100644 --- a/README.md +++ b/README.md @@ -31,14 +31,15 @@ cloud-provider specific code out of the Kubernetes codebase. | v1.23.0 | v1.23 | - | | v1.24.2 | v1.24 | - | | v1.25.2 | v1.25 | - | -| v1.26.3 | v1.26 | - | -| v1.27.2 | v1.27 | - | -| v1.28.0 | v1.28 | - | - +| v1.26.4 | v1.26 | - | +| v1.27.3 | v1.27 | - | +| v1.28.2 | v1.28 | - | +| v1.29.1 | v1.29 | - | +| v1.30.0 | v1.30 | - | Note: -Versions older than v1.27.3 are no longer supported, new features / bug fixes will be available in v1.27.3 and later. +Versions older than v1.28.2 are no longer supported, new features / bug fixes will be available in v1.28.2 and later. ## Implementation Currently `oci-cloud-controller-manager` implements: diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt index dc6d28c9ec..43bf40faab 100644 --- a/THIRD_PARTY_LICENSES.txt +++ b/THIRD_PARTY_LICENSES.txt @@ -1130,7 +1130,6 @@ SPDX:MIT == Copyright Copyright (c) 2013-2014 Onsi Fakhouri -Copyright (c) 2014 Amit Kumar Gupta --------------------------------- (separator) ---------------------------------- @@ -1263,16 +1262,12 @@ END OF TERMS AND CONDITIONS == Copyright -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. -Copyright (c) 2013 The Go Authors. All rights reserved. Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2018, 2024, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2023 Oracle and/or its affiliates. Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. -Copyright © 2012-2020 Mat Ryer, Tyler Bunnell and contributors. -Copyright © 2013 The Go Authors. All rights reserved. == Notices Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. @@ -1978,7 +1973,6 @@ go.uber.org/zap SPDX:MIT == Copyright -Copyright (c) "*" Uber Technologies, Inc.") Copyright (c) 2016 Uber Technologies, Inc. Copyright (c) 2016, 2017 Uber Technologies, Inc. Copyright (c) 2016-2017 Uber Technologies, Inc. @@ -2120,6 +2114,7 @@ Copyright 2020 The Go Authors. All rights reserved. Copyright 2021 The Go Authors. All rights reserved. Copyright 2022 The Go Authors. All rights reserved. Copyright 2023 The Go Authors. All rights reserved. +Copyright 2024 The Go Authors. All rights reserved. == Patents Additional IP Rights Grant (Patents) @@ -3018,6 +3013,7 @@ Copyright 2020 The Kubernetes Authors. Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors. Copyright 2023 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. --------------------------------- (separator) ---------------------------------- @@ -4007,5 +4003,5 @@ the Mozilla Public License, v. 2.0. === ATTRIBUTION-HELPER-GENERATED: -=== Attribution helper version: {Major:0 Minor:11 GitVersion: GitCommit: GitTreeState:dirty BuildDate:1970-01-01T00:00:00Z GoVersion:go1.19.3 Compiler:gc Platform:darwin/arm64} -=== License file based on go.mod with md5 sum: f13d103ac6a1fd64d889146a5a1dbc24 +=== Attribution helper version: {Major:0 Minor:11 GitVersion:0.10.0-114-g3747dab9 GitCommit:3747dab92eb29c0dbe6409ffbb824b9ae3a04b87 GitTreeState:dirty BuildDate:2024-02-28T16:52:52Z GoVersion:go1.21.0 Compiler:gc Platform:darwin/amd64} +=== License file based on go.mod with md5 sum: 002d30e85246feb4f9ea96ae4ad1d826 diff --git a/go.mod b/go.mod index 061f37733e..51d762d6ce 100644 --- a/go.mod +++ b/go.mod @@ -6,35 +6,35 @@ replace ( github.com/docker/docker => github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0 google.golang.org/grpc => google.golang.org/grpc v1.61.0 - k8s.io/api => k8s.io/api v0.28.4 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.28.4 - k8s.io/apiserver => k8s.io/apiserver v0.28.4 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.4 - k8s.io/client-go => k8s.io/client-go v0.28.4 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.4 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.4 - k8s.io/code-generator => k8s.io/code-generator v0.28.4 - k8s.io/component-base => k8s.io/component-base v0.28.4 - k8s.io/component-helpers => k8s.io/component-helpers v0.28.4 - k8s.io/controller-manager => k8s.io/controller-manager v0.28.4 - k8s.io/cri-api => k8s.io/cri-api v0.28.4 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.4 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.4 + k8s.io/api => k8s.io/api v0.28.15 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.15 + k8s.io/apimachinery => k8s.io/apimachinery v0.28.15 + k8s.io/apiserver => k8s.io/apiserver v0.28.15 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.15 + k8s.io/client-go => k8s.io/client-go v0.28.15 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.15 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.15 + k8s.io/code-generator => k8s.io/code-generator v0.28.15 + k8s.io/component-base => k8s.io/component-base v0.28.15 + k8s.io/component-helpers => k8s.io/component-helpers v0.28.15 + k8s.io/controller-manager => k8s.io/controller-manager v0.28.15 + k8s.io/cri-api => k8s.io/cri-api v0.28.15 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.15 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.15 k8s.io/endpointslice => k8s.io/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20230810203337-add7e14df11e - k8s.io/kms => k8s.io/kms v0.28.4 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.4 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.4 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.4 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.4 - k8s.io/kubectl => k8s.io/kubectl v0.28.4 - k8s.io/kubelet => k8s.io/kubelet v0.28.4 - k8s.io/kubernetes => k8s.io/kubernetes v1.28.4 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.4 - k8s.io/metrics => k8s.io/metrics v0.28.4 - k8s.io/mount-utils => k8s.io/mount-utils v0.28.4 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.4 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.4 + k8s.io/kms => k8s.io/kms v0.28.15 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.15 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.15 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.15 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.15 + k8s.io/kubectl => k8s.io/kubectl v0.28.15 + k8s.io/kubelet => k8s.io/kubelet v0.28.15 + k8s.io/kubernetes => k8s.io/kubernetes v1.28.15 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.15 + k8s.io/metrics => k8s.io/metrics v0.28.15 + k8s.io/mount-utils => k8s.io/mount-utils v0.28.15 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.15 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.15 ) require ( @@ -52,25 +52,25 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.8.1 go.uber.org/zap v1.25.0 - golang.org/x/net v0.21.0 + golang.org/x/net v0.23.0 golang.org/x/sys v0.18.0 // indirect google.golang.org/grpc v1.63.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.28.4 - k8s.io/apimachinery v0.28.4 - k8s.io/apiserver v0.28.4 // indirect - k8s.io/client-go v0.28.4 - k8s.io/cloud-provider v0.28.4 - k8s.io/component-base v0.28.4 - k8s.io/component-helpers v0.28.4 - k8s.io/controller-manager v0.28.4 // indirect - k8s.io/csi-translation-lib v0.28.4 // indirect + k8s.io/api v0.28.15 + k8s.io/apimachinery v0.28.15 + k8s.io/apiserver v0.28.15 // indirect + k8s.io/client-go v0.28.15 + k8s.io/cloud-provider v0.28.15 + k8s.io/component-base v0.28.15 + k8s.io/component-helpers v0.28.15 + k8s.io/controller-manager v0.28.15 // indirect + k8s.io/csi-translation-lib v0.28.15 // indirect k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.100.1 - k8s.io/kubelet v0.28.4 // indirect - k8s.io/kubernetes v1.28.4 - k8s.io/mount-utils v0.28.4 + k8s.io/kubelet v0.28.15 // indirect + k8s.io/kubernetes v1.28.15 + k8s.io/mount-utils v0.28.15 k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 sigs.k8s.io/sig-storage-lib-external-provisioner/v9 v9.1.0-rc.0 ) @@ -167,12 +167,12 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/mod v0.12.0 // indirect + golang.org/x/mod v0.14.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.12.0 // indirect + golang.org/x/tools v0.16.1 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect @@ -180,10 +180,10 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - k8s.io/kms v0.28.4 // indirect + k8s.io/kms v0.28.15 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/kube-scheduler v0.0.0 // indirect - k8s.io/kubectl v0.28.4 // indirect + k8s.io/kubectl v0.28.15 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect diff --git a/go.sum b/go.sum index 341742adba..ca610cbf04 100644 --- a/go.sum +++ b/go.sum @@ -1779,8 +1779,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1848,8 +1848,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2110,8 +2110,8 @@ golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= -golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2448,44 +2448,44 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= -k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= -k8s.io/apiextensions-apiserver v0.28.4 h1:AZpKY/7wQ8n+ZYDtNHbAJBb+N4AXXJvyZx6ww6yAJvU= -k8s.io/apiextensions-apiserver v0.28.4/go.mod h1:pgQIZ1U8eJSMQcENew/0ShUTlePcSGFq6dxSxf2mwPM= -k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= -k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= -k8s.io/apiserver v0.28.4 h1:BJXlaQbAU/RXYX2lRz+E1oPe3G3TKlozMMCZWu5GMgg= -k8s.io/apiserver v0.28.4/go.mod h1:Idq71oXugKZoVGUUL2wgBCTHbUR+FYTWa4rq9j4n23w= -k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= -k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= -k8s.io/cloud-provider v0.28.4 h1:7obmeuJJ5CYTO9HANDqemf/d2v95U+F0t8aeH4jNOsQ= -k8s.io/cloud-provider v0.28.4/go.mod h1:xbhmGZ7wRHgXFP3SNsvdmFRO87KJIvirDYQA5ydMgGA= -k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo= -k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU= -k8s.io/component-helpers v0.28.4 h1:+X9VXT5+jUsRdC26JyMZ8Fjfln7mSjgumafocE509C4= -k8s.io/component-helpers v0.28.4/go.mod h1:8LzMalOQ0K10tkBJWBWq8h0HTI9HDPx4WT3QvTFn9Ro= -k8s.io/controller-manager v0.28.4 h1:8uJmo1pD6fWYk4mC/JfZQU6zPvuCgEHf3pd5G39ldDU= -k8s.io/controller-manager v0.28.4/go.mod h1:pnO+UK2mcWNu1MxucqI8xHPD/8UBm04IUmp2u/3vbnM= -k8s.io/csi-translation-lib v0.28.4 h1:4TrU2zefZGU5HQCyPZvcPxkS6IowqZ/jBs2Qi/dPUpc= -k8s.io/csi-translation-lib v0.28.4/go.mod h1:oxwDdx0hyVqViINOUF7TGrVt51eqsOkQ0BTI+A9QcQs= +k8s.io/api v0.28.15 h1:u+Sze8gI+DayQxndS0htiJf8yVooHyUx/H4jEehtmNs= +k8s.io/api v0.28.15/go.mod h1:SJuOJTphYG05iJC9UKnUTNkY84Mvveu1P7adCgWqjCg= +k8s.io/apiextensions-apiserver v0.28.15 h1:NPlscxcvT4EutwP6rHTrlEvBvUj8HUBlXvFfUqLPWI4= +k8s.io/apiextensions-apiserver v0.28.15/go.mod h1:JguNDFsusOJwhHTzZAW2oeQ+15ekGy1ubZX9XWcZn+k= +k8s.io/apimachinery v0.28.15 h1:Jg15ZoCcAgnhSRKVS6tQyUZaX9c3i08bl2qAz8XE3bI= +k8s.io/apimachinery v0.28.15/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= +k8s.io/apiserver v0.28.15 h1:i6cRWnWVtGL+1XCBBYilZ+dC/Yi8USpogVB4Sw2k+f4= +k8s.io/apiserver v0.28.15/go.mod h1:aEe/SUzUWmXZAYUxSUvOyo0xXtisjU+D7numTbu2Sz4= +k8s.io/client-go v0.28.15 h1:+g6Ub+i6tacV3tYJaoyK6bizpinPkamcEwsiKyHcIxc= +k8s.io/client-go v0.28.15/go.mod h1:/4upIpTbhWQVSXKDqTznjcAegj2Bx73mW/i0aennJrY= +k8s.io/cloud-provider v0.28.15 h1:QTh+CVo+Rso6xIge5Hni7gx6a7j7PvkmedgzRmnsYeE= +k8s.io/cloud-provider v0.28.15/go.mod h1:HdFjkzkUID7egSZO6n9sQHuPL7dLYHzqF6SnJXyOpeM= +k8s.io/component-base v0.28.15 h1:PRwUVO0iiKYjC9fYU2lCLlfjQffHcMv7D4ZhK9WWrKg= +k8s.io/component-base v0.28.15/go.mod h1:EtoV2f+v7rIrUlaEj1VkE5WuYGjMSBXPcXapu7tSsBs= +k8s.io/component-helpers v0.28.15 h1:VsQ7hcdqkHJ/9jR60JxK68GBUs9w0ChVXBamELwYWp4= +k8s.io/component-helpers v0.28.15/go.mod h1:o/OqOdzWU6rqqcLccRAjntG0i1/8Y5swrCQmBw0h+DU= +k8s.io/controller-manager v0.28.15 h1:T2/kyd106i3KcozKu9FwZcRNXpWjUJEI57+ikeuXJSs= +k8s.io/controller-manager v0.28.15/go.mod h1:AekcdDdcDTeL64TMiIusTh/VLCT1Q9CkhIR3IriR9oI= +k8s.io/csi-translation-lib v0.28.15 h1:DnSlqp9u7cB1Yw4JjH2KxZH7g1jeY59LKpkF9foOGHU= +k8s.io/csi-translation-lib v0.28.15/go.mod h1:BOLWQ0A1jLXKaJ2K8sS84XHna00I/IJX6aF1OS6AzRY= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.28.4 h1:PMgY/3CQTWP9eIKmNQiTgjLIZ0ns6O+voagzD2/4mSg= -k8s.io/kms v0.28.4/go.mod h1:HL4/lR/bhjAJPbqycKtfhWiKh1Sp21cpHOL8P4oo87w= +k8s.io/kms v0.28.15 h1:zvKR1nafYPbzdbq2tEl+dH1UlMnuov1pUMtfy73u224= +k8s.io/kms v0.28.15/go.mod h1:GsOkSBYseV1BsO1T4RW50SfyPmbEH0KUWEMRYQCNN0A= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kube-scheduler v0.28.4 h1:QdUvqNn4z9JbgLIwemj9zeGW5kJUtW+WDd8rev5HBDA= -k8s.io/kube-scheduler v0.28.4/go.mod h1:pHz0xQOjwDc+VpHhCE2KM1fER3ldm0vABnq0myBHsoI= -k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ= -k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c= -k8s.io/kubelet v0.28.4 h1:Ypxy1jaFlSXFXbg/yVtFOU2ZxErBVRJfLu8+t4s7Dtw= -k8s.io/kubelet v0.28.4/go.mod h1:w1wPI12liY/aeC70nqKYcNNkr6/nbyvdMB7P7wmww2o= -k8s.io/kubernetes v1.28.4 h1:aRNxs5jb8FVTtlnxeA4FSDBVKuFwA8Gw40/U2zReBYA= -k8s.io/kubernetes v1.28.4/go.mod h1:BTzDCKYAlu6LL9ITbfjwgwIrJ30hlTgbv0eXDoA/WoA= -k8s.io/mount-utils v0.28.4 h1:5GOZLm2dXi2fr+MKY8hS6kdV5reXrZBiK7848O5MVD0= -k8s.io/mount-utils v0.28.4/go.mod h1:ceMAZ+Nzlk8zOwN205YXXGJRGmf1o0/XIwsKnG44p0I= +k8s.io/kube-scheduler v0.28.15 h1:5ktYFiyjz74EAfDxh3lLKPc5e+KTYqCv2n4b4+h2dTE= +k8s.io/kube-scheduler v0.28.15/go.mod h1:e+VsRVp1SC3BW9sr1WlZPp9DhD2e0qOEXo8LDy3UmuU= +k8s.io/kubectl v0.28.15 h1:E0Oa/pe9qu6F7tCcUvDS6cjCxLVysvrodh7bs1ksMzg= +k8s.io/kubectl v0.28.15/go.mod h1:BS5gCoO2PrSturdcXxDQw1r6K6F0Gg6pb7ofAKsQbps= +k8s.io/kubelet v0.28.15 h1:AQo04wqu1teykYfHlCQBaUHwj+e8wLyRABrUaXHb8rk= +k8s.io/kubelet v0.28.15/go.mod h1:o5NqpzA4biIlssOMaHkPG/CsI6ku3WpVUuAQvpos2iQ= +k8s.io/kubernetes v1.28.15 h1:d91N2Znk+E8REIJZlrD6za2JtIwvEypa0DB1IoU4fJ0= +k8s.io/kubernetes v1.28.15/go.mod h1:chlmcCDBnOA/y+572cw8dO0Rci1wiA8bm5+zhPdFLCk= +k8s.io/mount-utils v0.28.15 h1:f0bT4qUY/N6J1/9bMjxPa1dGG8mljzE6f+lgIX/MN2c= +k8s.io/mount-utils v0.28.15/go.mod h1:ZxAFXgKzcAyi3VTd2pKFlZFswl9Q/cveJ5aptdjQOuc= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= diff --git a/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml b/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml index 488cad57c2..221bff2c32 100644 --- a/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml +++ b/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml @@ -42,7 +42,7 @@ spec: path: /etc/kubernetes containers: - name: oci-cloud-controller-manager - image: ghcr.io/oracle/cloud-provider-oci:v1.28.1 + image: ghcr.io/oracle/cloud-provider-oci:v1.28.2 command: ["/usr/local/bin/oci-cloud-controller-manager"] args: - --cloud-config=/etc/oci/cloud-provider.yaml diff --git a/manifests/container-storage-interface/oci-csi-controller-driver.yaml b/manifests/container-storage-interface/oci-csi-controller-driver.yaml index fbb1cb89cc..171249d648 100644 --- a/manifests/container-storage-interface/oci-csi-controller-driver.yaml +++ b/manifests/container-storage-interface/oci-csi-controller-driver.yaml @@ -96,7 +96,7 @@ spec: - --fss-csi-endpoint=unix://var/run/shared-tmpfs/csi-fss.sock command: - /usr/local/bin/oci-csi-controller-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.28.1 + image: ghcr.io/oracle/cloud-provider-oci:v1.28.2 imagePullPolicy: IfNotPresent volumeMounts: - name: config diff --git a/manifests/container-storage-interface/oci-csi-node-driver.yaml b/manifests/container-storage-interface/oci-csi-node-driver.yaml index 72790d0d6b..bd9529e7bc 100644 --- a/manifests/container-storage-interface/oci-csi-node-driver.yaml +++ b/manifests/container-storage-interface/oci-csi-node-driver.yaml @@ -117,7 +117,7 @@ spec: fieldPath: spec.nodeName - name: PATH value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/host/usr/bin:/host/sbin - image: ghcr.io/oracle/cloud-provider-oci:v1.28.1 + image: ghcr.io/oracle/cloud-provider-oci:v1.28.2 securityContext: privileged: true volumeMounts: diff --git a/manifests/container-storage-interface/oci-csi-node-rbac.yaml b/manifests/container-storage-interface/oci-csi-node-rbac.yaml index 36de8dcd7a..cc9ab5ef0f 100644 --- a/manifests/container-storage-interface/oci-csi-node-rbac.yaml +++ b/manifests/container-storage-interface/oci-csi-node-rbac.yaml @@ -56,6 +56,9 @@ rules: - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots/status" ] verbs: [ "update", "patch" ] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create"] --- kind: ClusterRoleBinding diff --git a/manifests/flexvolume-driver/oci-flexvolume-driver.yaml b/manifests/flexvolume-driver/oci-flexvolume-driver.yaml index b942a0220f..8fe61417a0 100644 --- a/manifests/flexvolume-driver/oci-flexvolume-driver.yaml +++ b/manifests/flexvolume-driver/oci-flexvolume-driver.yaml @@ -40,7 +40,7 @@ spec: secretName: oci-flexvolume-driver containers: - name: oci-flexvolume-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.28.1 + image: ghcr.io/oracle/cloud-provider-oci:v1.28.2 command: ["/usr/local/bin/install.py", "-c", "/tmp/config.yaml"] securityContext: privileged: true @@ -76,7 +76,7 @@ spec: type: DirectoryOrCreate containers: - name: oci-flexvolume-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.28.1 + image: ghcr.io/oracle/cloud-provider-oci:v1.28.2 command: ["/usr/local/bin/install.py"] securityContext: privileged: true diff --git a/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml b/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml index 1ed7a096cf..6a222e4734 100644 --- a/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml +++ b/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml @@ -35,7 +35,7 @@ spec: secretName: oci-volume-provisioner containers: - name: oci-volume-provisioner - image: ghcr.io/oracle/cloud-provider-oci:v1.28.1 + image: ghcr.io/oracle/cloud-provider-oci:v1.28.2 command: ["/usr/local/bin/oci-volume-provisioner"] env: - name: NODE_NAME diff --git a/manifests/volume-provisioner/oci-volume-provisioner.yaml b/manifests/volume-provisioner/oci-volume-provisioner.yaml index 4212148da5..bd09268ff9 100644 --- a/manifests/volume-provisioner/oci-volume-provisioner.yaml +++ b/manifests/volume-provisioner/oci-volume-provisioner.yaml @@ -35,7 +35,7 @@ spec: secretName: oci-volume-provisioner containers: - name: oci-volume-provisioner - image: ghcr.io/oracle/cloud-provider-oci:v1.28.1 + image: ghcr.io/oracle/cloud-provider-oci:v1.28.2 command: ["/usr/local/bin/oci-volume-provisioner"] env: - name: NODE_NAME diff --git a/pkg/cloudprovider/providers/oci/instances.go b/pkg/cloudprovider/providers/oci/instances.go index 7c5100426e..e3540e9b97 100644 --- a/pkg/cloudprovider/providers/oci/instances.go +++ b/pkg/cloudprovider/providers/oci/instances.go @@ -376,7 +376,7 @@ func (cp *CloudProvider) checkForAuthorizationError(ctx context.Context, instanc return false, err } // to eliminate AD specific issues, list all ADs and make AD specific requests - availabilityDomains, err := cp.client.Identity().ListAvailabilityDomains(ctx, compartmentId) + availabilityDomains, err := cp.client.Identity(nil).ListAvailabilityDomains(ctx, compartmentId) for _, availabilityDomain := range availabilityDomains { instances, err := cp.client.Compute().ListInstancesByCompartmentAndAD(ctx, compartmentId, *availabilityDomain.Name) // if we are getting errors for ListInstances the issue can be authorization or other issues diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index 50ff63e3bf..657928ea75 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -195,40 +195,6 @@ type client struct { logger *zap.SugaredLogger } -func setupBaseClient(log *zap.SugaredLogger, client *common.BaseClient, signer common.HTTPRequestSigner, interceptor common.RequestInterceptor, endpointOverrideEnvVar string) { - client.Signer = signer - client.Interceptor = interceptor - if endpointOverrideEnvVar != "" { - endpointOverride, ok := os.LookupEnv(endpointOverrideEnvVar) - if ok && endpointOverride != "" { - client.Host = endpointOverride - } - } - clusterIpFamily, ok := os.LookupEnv(ClusterIpFamilyEnv) - // currently as dual stack endpoints are going to be present in selected regions, only for IPv6 single stack cluster we will be using dual stack endpoints - if ok && strings.EqualFold(clusterIpFamily, Ipv6Stack) { - // TODO: Uncomment once method is available in Public SDK - //client.EnableDualStackEndpoints(true) - - region, ok := os.LookupEnv("OCI_RESOURCE_PRINCIPAL_REGION") - if !ok { - log.Errorf("unable to get OCI_RESOURCE_PRINCIPAL_REGION env var for region") - } - - authEndpoint, ok := os.LookupEnv("OCI_SDK_AUTH_CLIENT_REGION_URL") - if !ok { - authDualStackEndpoint := common.StringToRegion(region).EndpointForTemplate("", "ds.auth.{region}.oci.{secondLevelDomain}") - if err := os.Setenv("OCI_SDK_AUTH_CLIENT_REGION_URL", authDualStackEndpoint); err != nil { - log.Errorf("unable to set OCI_SDK_AUTH_CLIENT_REGION_URL env var for oci auth dual stack endpoint") - } else { - log.Infof("OCI_SDK_AUTH_CLIENT_REGION_URL env var set to: %s", authDualStackEndpoint) - } - } else { - log.Infof("OCI_SDK_AUTH_CLIENT_REGION_URL env var set to: %s", authEndpoint) - } - } -} - // New constructs an OCI API client. func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimiter *RateLimiter, targetTenancyID string) (Interface, error) { @@ -237,8 +203,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewComputeClientWithConfigurationProvider") } - setupBaseClient(logger, &compute.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &compute.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring load balancer client custom transport") @@ -249,8 +213,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewVirtualNetworkClientWithConfigurationProvider") } - setupBaseClient(logger, &network.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &network.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring load balancer client custom transport") @@ -261,8 +223,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewLoadBalancerClientWithConfigurationProvider") } - setupBaseClient(logger, &lb.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &lb.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring loadbalancer client custom transport") @@ -273,8 +233,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewNetworkLoadBalancerClientWithConfigurationProvider") } - setupBaseClient(logger, &nlb.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &nlb.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring networkloadbalancer client custom transport") @@ -285,8 +243,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewIdentityClientWithConfigurationProvider") } - setupBaseClient(logger, &identity.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &identity.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring identity service client custom transport") @@ -298,7 +254,7 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit // return nil, errors.Wrap(err, "NewCompartmentsClientWithConfigurationProvider") //} // - //setupBaseClient(logger, &compartment.BaseClient, nil, nil, "") + //setupBaseClient(logger, &compartment.BaseClient, signer, interceptor, "") // //err = configureCustomTransport(logger, &compartment.BaseClient) //if err != nil { @@ -310,8 +266,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewBlockstorageClientWithConfigurationProvider") } - setupBaseClient(logger, &bs.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &bs.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring block storage service client custom transport") @@ -322,8 +276,6 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit return nil, errors.Wrap(err, "NewFileStorageClientWithConfigurationProvider") } - setupBaseClient(logger, &fss.BaseClient, nil, nil, "") - err = configureCustomTransport(logger, &fss.BaseClient) if err != nil { return nil, errors.Wrap(err, "configuring file storage service client custom transport") @@ -394,7 +346,6 @@ func (c *client) LoadBalancer(logger *zap.SugaredLogger, lbType string, targetTe logger.Error("Failed to get new LB client with oke workload identity configuration provider! Error:" + err.Error()) return nil } - setupBaseClient(logger, &lb.BaseClient, nil, nil, "") err = configureCustomTransport(logger, &lb.BaseClient) if err != nil { @@ -414,7 +365,6 @@ func (c *client) LoadBalancer(logger *zap.SugaredLogger, lbType string, targetTe logger.Error("Failed to get new NLB client with oke workload identity configuration provider! Error:" + err.Error()) return nil } - setupBaseClient(logger, &nlb.BaseClient, nil, nil, "") err = configureCustomTransport(logger, &nlb.BaseClient) if err != nil { @@ -445,8 +395,6 @@ func (c *client) Networking(ociClientConfig *OCIClientConfig) NetworkingInterfac return nil } - setupBaseClient(c.logger, &network.BaseClient, nil, nil, "") - err = configureCustomTransport(c.logger, &network.BaseClient) if err != nil { c.logger.Error("Failed configure custom transport for Network Client %v", err) @@ -483,8 +431,6 @@ func (c *client) Identity(ociClientConfig *OCIClientConfig) IdentityInterface { return nil } - setupBaseClient(c.logger, &identity.BaseClient, nil, nil, "") - err = configureCustomTransport(c.logger, &identity.BaseClient) if err != nil { c.logger.Error("Failed configure custom transport for Identity Client %v", err) @@ -498,7 +444,7 @@ func (c *client) Identity(ociClientConfig *OCIClientConfig) IdentityInterface { // return nil //} // - //setupBaseClient(c.logger, &compartment.BaseClient, nil, nil, "") + //setupBaseClient(c.logger, &compartment.BaseClient, signer, interceptor, "") // //err = configureCustomTransport(c.logger, &compartment.BaseClient) //if err != nil { @@ -508,11 +454,11 @@ func (c *client) Identity(ociClientConfig *OCIClientConfig) IdentityInterface { return &client{ //compartment: &compartment, - identity: &identity, - requestMetadata: c.requestMetadata, - rateLimiter: c.rateLimiter, - subnetCache: cache.NewTTLStore(subnetCacheKeyFn, time.Duration(24)*time.Hour), - logger: c.logger, + identity: &identity, + requestMetadata: c.requestMetadata, + rateLimiter: c.rateLimiter, + subnetCache: cache.NewTTLStore(subnetCacheKeyFn, time.Duration(24)*time.Hour), + logger: c.logger, } } return c @@ -536,8 +482,6 @@ func (c *client) FSS(ociClientConfig *OCIClientConfig) FileStorageInterface { return nil } - setupBaseClient(c.logger, &fc.BaseClient, nil, nil, "") - err = configureCustomTransport(c.logger, &fc.BaseClient) if err != nil { c.logger.Errorf("Failed configure custom transport for FSS Client %v", err.Error()) diff --git a/pkg/oci/client/client_factory.go b/pkg/oci/client/client_factory.go index 678671466f..1d7410e7aa 100644 --- a/pkg/oci/client/client_factory.go +++ b/pkg/oci/client/client_factory.go @@ -19,7 +19,7 @@ import ( "go.uber.org/zap" ) -//GetClient returns the client for given Configuration +// GetClient returns the client for given Configuration func GetClient(logger *zap.SugaredLogger, cfg *config.Config) (Interface, error) { cp, err := config.NewConfigurationProvider(cfg) if err != nil { @@ -29,6 +29,6 @@ func GetClient(logger *zap.SugaredLogger, cfg *config.Config) (Interface, error) rateLimiter := NewRateLimiter(logger, cfg.RateLimiter) - c, err := New(logger, cp, &rateLimiter) + c, err := New(logger, cp, &rateLimiter, cfg.Auth.TenancyID) return c, err } diff --git a/pkg/oci/client/identity.go b/pkg/oci/client/identity.go index b71c6f9d58..d5f1a33c19 100644 --- a/pkg/oci/client/identity.go +++ b/pkg/oci/client/identity.go @@ -28,6 +28,24 @@ import ( // by the volume provisioner. type IdentityInterface interface { GetAvailabilityDomainByName(ctx context.Context, compartmentID, name string) (*identity.AvailabilityDomain, error) + ListAvailabilityDomains(ctx context.Context, compartmentID string) ([]identity.AvailabilityDomain, error) +} + +func (c *client) ListAvailabilityDomains(ctx context.Context, compartmentID string) ([]identity.AvailabilityDomain, error) { + if !c.rateLimiter.Reader.TryAccept() { + return nil, RateLimitError(false, "ListAvailabilityDomains") + } + + resp, err := c.identity.ListAvailabilityDomains(ctx, identity.ListAvailabilityDomainsRequest{ + CompartmentId: &compartmentID, + RequestMetadata: c.requestMetadata, + }) + incRequestCounter(err, listVerb, availabilityDomainResource) + if err != nil { + return nil, errors.WithStack(err) + } + + return resp.Items, nil } func (c *client) GetAvailabilityDomainByName(ctx context.Context, compartmentID, name string) (*identity.AvailabilityDomain, error) { diff --git a/pkg/volume/provisioner/core/provisioner.go b/pkg/volume/provisioner/core/provisioner.go index 3cc61cc119..e1bb67872e 100644 --- a/pkg/volume/provisioner/core/provisioner.go +++ b/pkg/volume/provisioner/core/provisioner.go @@ -124,7 +124,7 @@ func NewOCIProvisioner(logger *zap.SugaredLogger, kubeClient kubernetes.Interfac rateLimiter := client.NewRateLimiter(logger, cfg.RateLimiter) - client, err := client.New(logger, cp, &rateLimiter) + client, err := client.New(logger, cp, &rateLimiter, cfg.Auth.TenancyID) if err != nil { return nil, errors.Wrapf(err, "unable to construct OCI client") } diff --git a/test/e2e/cloud-provider-oci/fss_dynamic.go b/test/e2e/cloud-provider-oci/fss_dynamic.go index a0cbcbc75b..332daee119 100644 --- a/test/e2e/cloud-provider-oci/fss_dynamic.go +++ b/test/e2e/cloud-provider-oci/fss_dynamic.go @@ -27,7 +27,7 @@ import ( ) const ( - defaultExportOptionsJsonString = "[{\"source\":\"10.0.0.0/16\",\"requirePrivilegedSourcePort\":false,\"access\":\"READ_WRITE\",\"identitySquash\":\"NONE\",\"anonymousUid\":0,\"anonymousGid\":0},{\"source\":\"2603:c020:4015:2100::/56\",\"requirePrivilegedSourcePort\":false,\"access\":\"READ_WRITE\",\"identitySquash\":\"NONE\"},{\"source\":\"2603:c020:11:1500::/56\",\"requirePrivilegedSourcePort\":false,\"access\":\"READ_WRITE\",\"identitySquash\":\"NONE\"}]" + defaultExportOptionsJsonString = "[{\"source\":\"10.0.0.0/16\",\"requirePrivilegedSourcePort\":false,\"access\":\"READ_WRITE\",\"identitySquash\":\"NONE\",\"anonymousUid\":0,\"anonymousGid\":0}]" ) var _ = Describe("Dynamic FSS test in cluster compartment", func() { diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e2b298d859..43557ab7e9 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1564,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984fd9..3b9f06b962 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c6408d..ce2e8b40ee 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -924,7 +925,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } @@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 0000000000..61075bd16d --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86c6..ce375c8c75 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } } + return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2911,9 +3065,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2955,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go index 69a4ac7eef..1e64157f3e 100644 --- a/vendor/golang.org/x/net/websocket/client.go +++ b/vendor/golang.org/x/net/websocket/client.go @@ -6,10 +6,12 @@ package websocket import ( "bufio" + "context" "io" "net" "net/http" "net/url" + "time" ) // DialError is an error that occurs while dialling a websocket server. @@ -79,28 +81,59 @@ func parseAuthority(location *url.URL) string { // DialConfig opens a new client connection to a WebSocket with a config. func DialConfig(config *Config) (ws *Conn, err error) { - var client net.Conn + return config.DialContext(context.Background()) +} + +// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation. +func (config *Config) DialContext(ctx context.Context) (*Conn, error) { if config.Location == nil { return nil, &DialError{config, ErrBadWebSocketLocation} } if config.Origin == nil { return nil, &DialError{config, ErrBadWebSocketOrigin} } + dialer := config.Dialer if dialer == nil { dialer = &net.Dialer{} } - client, err = dialWithDialer(dialer, config) - if err != nil { - goto Error - } - ws, err = NewClient(config, client) + + client, err := dialWithDialer(ctx, dialer, config) if err != nil { - client.Close() - goto Error + return nil, &DialError{config, err} } - return -Error: - return nil, &DialError{config, err} + // Cleanup the connection if we fail to create the websocket successfully + success := false + defer func() { + if !success { + _ = client.Close() + } + }() + + var ws *Conn + var wsErr error + doneConnecting := make(chan struct{}) + go func() { + defer close(doneConnecting) + ws, err = NewClient(config, client) + if err != nil { + wsErr = &DialError{config, err} + } + }() + + // The websocket.NewClient() function can block indefinitely, make sure that we + // respect the deadlines specified by the context. + select { + case <-ctx.Done(): + // Force the pending operations to fail, terminating the pending connection attempt + _ = client.SetDeadline(time.Now()) + <-doneConnecting // Wait for the goroutine that tries to establish the connection to finish + return nil, &DialError{config, ctx.Err()} + case <-doneConnecting: + if wsErr == nil { + success = true // Disarm the deferred connection cleanup + } + return ws, wsErr + } } diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go index 2dab943a48..8a2d83c473 100644 --- a/vendor/golang.org/x/net/websocket/dial.go +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -5,18 +5,23 @@ package websocket import ( + "context" "crypto/tls" "net" ) -func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { +func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) { switch config.Location.Scheme { case "ws": - conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) case "wss": - conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + tlsDialer := &tls.Dialer{ + NetDialer: dialer, + Config: config.TlsConfig, + } + conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) default: err = ErrBadScheme } diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdfec..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 5627d70e39..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index d60ab1b419..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 18a002f82a..333676b7cf 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -8,42 +8,46 @@ package packagesdriver import ( "context" "fmt" - "go/types" "strings" "golang.org/x/tools/internal/gocommand" ) -var debug = false - -func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { inv.Verb = "list" inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string if rawErr != nil { - if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? inv.Verb = "env" inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { - return nil, enverr + return "", "", enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr } else { - return nil, friendlyErr + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } - return types.SizesFor(compiler, goarch), nil + return compiler, goarch, nil } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index da4ab89fe6..b2a0b7c6a6 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -5,12 +5,32 @@ /* Package packages loads Go packages for inspection and analysis. -The Load function takes as input a list of patterns and return a list of Package -structs describing individual packages matched by those patterns. -The LoadMode controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool, -but all patterns with the prefix "query=", where query is a +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. + +Load may be used in Go projects that use alternative build systems, by +installing an appropriate "driver" program for the build system and +specifying its location in the GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +they identify. +(See driverRequest and driverResponse types for the JSON +schema used by the protocol. +Though the protocol is supported, these types are currently unexported; +see #64608 for a proposal to publish them.) + +Regardless of driver, all patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -35,7 +55,7 @@ The Package struct provides basic information about the package, including - Imports, a map from source import strings to the Packages they name; - Types, the type information for the package's exported symbols; - Syntax, the parsed syntax trees for the package's source code; and - - TypeInfo, the result of a complete type-check of the package syntax trees. + - TypesInfo, the result of a complete type-check of the package syntax trees. (See the documentation for type Package for the complete list of fields and more detailed descriptions.) @@ -64,7 +84,7 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to the loader, so that the loader can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. */ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7242a0a7d2..7db1d1293a 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -12,8 +12,8 @@ import ( "bytes" "encoding/json" "fmt" - exec "golang.org/x/sys/execabs" "os" + "os/exec" "strings" ) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 58230038a7..cd375fbc3c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,10 +9,9 @@ import ( "context" "encoding/json" "fmt" - "go/types" - "io/ioutil" "log" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -22,7 +21,6 @@ import ( "sync" "unicode" - exec "golang.org/x/sys/execabs" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" @@ -153,10 +151,10 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - // types.SizesFor always returns nil or a *types.StdSizes. - response.dr.Sizes, _ = sizes.(*types.StdSizes) + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + sizeserr = err + response.dr.Compiler = compiler + response.dr.Arch = arch sizeswg.Done() }() } @@ -210,62 +208,6 @@ extractQueries: } } - // Only use go/packages' overlay processing if we're using a Go version - // below 1.16. Otherwise, go list handles it. - if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { - modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return nil, err - } - - var containsCandidates []string - if len(containFiles) > 0 { - containsCandidates = append(containsCandidates, modifiedPkgs...) - containsCandidates = append(containsCandidates, needPkgs...) - } - if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { - return nil, err - } - // Check candidate packages for containFiles. - if len(containFiles) > 0 { - for _, id := range containsCandidates { - pkg, ok := response.seenPackages[id] - if !ok { - response.addPackage(&Package{ - ID: id, - Errors: []Error{{ - Kind: ListError, - Msg: fmt.Sprintf("package %s expected but not seen", id), - }}, - }) - continue - } - for _, f := range containFiles { - for _, g := range pkg.GoFiles { - if sameFile(f, g) { - response.addRoot(id) - } - } - } - } - } - // Add root for any package that matches a pattern. This applies only to - // packages that are modified by overlays, since they are not added as - // roots automatically. - for _, pattern := range restPatterns { - match := matchPattern(pattern) - for _, pkgID := range modifiedPkgs { - pkg, ok := response.seenPackages[pkgID] - if !ok { - continue - } - if match(pkg.PkgPath) { - response.addRoot(pkg.ID) - } - } - } - } - sizeswg.Wait() if sizeserr != nil { return nil, sizeserr @@ -273,24 +215,6 @@ extractQueries: return response.dr, nil } -func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { - if len(pkgs) == 0 { - return nil - } - dr, err := state.createDriverResponse(pkgs...) - if err != nil { - return err - } - for _, pkg := range dr.Packages { - response.addPackage(pkg) - } - _, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return err - } - return state.addNeededOverlayPackages(response, needPkgs) -} - func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. @@ -1110,7 +1034,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err if len(state.cfg.Overlay) == 0 { return "", func() {}, nil } - dir, err := ioutil.TempDir("", "gopackages-*") + dir, err := os.MkdirTemp("", "gopackages-*") if err != nil { return "", nil, err } @@ -1129,7 +1053,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err // Create a unique filename for the overlaid files, to avoid // creating nested directories. noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) if err != nil { return "", func() {}, err } @@ -1147,7 +1071,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err } // Write out the overlay file that contains the filepath mappings. filename = filepath.Join(dir, "overlay.json") - if err := ioutil.WriteFile(filename, b, 0665); err != nil { + if err := os.WriteFile(filename, b, 0665); err != nil { return "", func() {}, err } return filename, cleanup, nil diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 9576b472f9..d823c474ad 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -6,314 +6,11 @@ package packages import ( "encoding/json" - "fmt" - "go/parser" - "go/token" - "os" "path/filepath" - "regexp" - "sort" - "strconv" - "strings" "golang.org/x/tools/internal/gocommand" ) -// processGolistOverlay provides rudimentary support for adding -// files that don't exist on disk to an overlay. The results can be -// sometimes incorrect. -// TODO(matloob): Handle unsupported cases, including the following: -// - determining the correct package to add given a new import path -func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { - havePkgs := make(map[string]string) // importPath -> non-test package ID - needPkgsSet := make(map[string]bool) - modifiedPkgsSet := make(map[string]bool) - - pkgOfDir := make(map[string][]*Package) - for _, pkg := range response.dr.Packages { - // This is an approximation of import path to id. This can be - // wrong for tests, vendored packages, and a number of other cases. - havePkgs[pkg.PkgPath] = pkg.ID - dir, err := commonDir(pkg.GoFiles) - if err != nil { - return nil, nil, err - } - if dir != "" { - pkgOfDir[dir] = append(pkgOfDir[dir], pkg) - } - } - - // If no new imports are added, it is safe to avoid loading any needPkgs. - // Otherwise, it's hard to tell which package is actually being loaded - // (due to vendoring) and whether any modified package will show up - // in the transitive set of dependencies (because new imports are added, - // potentially modifying the transitive set of dependencies). - var overlayAddsImports bool - - // If both a package and its test package are created by the overlay, we - // need the real package first. Process all non-test files before test - // files, and make the whole process deterministic while we're at it. - var overlayFiles []string - for opath := range state.cfg.Overlay { - overlayFiles = append(overlayFiles, opath) - } - sort.Slice(overlayFiles, func(i, j int) bool { - iTest := strings.HasSuffix(overlayFiles[i], "_test.go") - jTest := strings.HasSuffix(overlayFiles[j], "_test.go") - if iTest != jTest { - return !iTest // non-tests are before tests. - } - return overlayFiles[i] < overlayFiles[j] - }) - for _, opath := range overlayFiles { - contents := state.cfg.Overlay[opath] - base := filepath.Base(opath) - dir := filepath.Dir(opath) - var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant - var testVariantOf *Package // if opath is a test file, this is the package it is testing - var fileExists bool - isTestFile := strings.HasSuffix(opath, "_test.go") - pkgName, ok := extractPackageName(opath, contents) - if !ok { - // Don't bother adding a file that doesn't even have a parsable package statement - // to the overlay. - continue - } - // If all the overlay files belong to a different package, change the - // package name to that package. - maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) - nextPackage: - for _, p := range response.dr.Packages { - if pkgName != p.Name && p.ID != "command-line-arguments" { - continue - } - for _, f := range p.GoFiles { - if !sameFile(filepath.Dir(f), dir) { - continue - } - // Make sure to capture information on the package's test variant, if needed. - if isTestFile && !hasTestFiles(p) { - // TODO(matloob): Are there packages other than the 'production' variant - // of a package that this can match? This shouldn't match the test main package - // because the file is generated in another directory. - testVariantOf = p - continue nextPackage - } else if !isTestFile && hasTestFiles(p) { - // We're examining a test variant, but the overlaid file is - // a non-test file. Because the overlay implementation - // (currently) only adds a file to one package, skip this - // package, so that we can add the file to the production - // variant of the package. (https://golang.org/issue/36857 - // tracks handling overlays on both the production and test - // variant of a package). - continue nextPackage - } - if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // We have already seen the production version of the - // for which p is a test variant. - if hasTestFiles(p) { - testVariantOf = pkg - } - } - pkg = p - if filepath.Base(f) == base { - fileExists = true - } - } - } - // The overlay could have included an entirely new package or an - // ad-hoc package. An ad-hoc package is one that we have manually - // constructed from inadequate `go list` results for a file= query. - // It will have the ID command-line-arguments. - if pkg == nil || pkg.ID == "command-line-arguments" { - // Try to find the module or gopath dir the file is contained in. - // Then for modules, add the module opath to the beginning. - pkgPath, ok, err := state.getPkgPath(dir) - if err != nil { - return nil, nil, err - } - if !ok { - break - } - var forTest string // only set for x tests - isXTest := strings.HasSuffix(pkgName, "_test") - if isXTest { - forTest = pkgPath - pkgPath += "_test" - } - id := pkgPath - if isTestFile { - if isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) - } else { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - } - if pkg != nil { - // TODO(rstambler): We should change the package's path and ID - // here. The only issue is that this messes with the roots. - } else { - // Try to reclaim a package with the same ID, if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break - } - } - // Otherwise, create a new package. - if pkg == nil { - pkg = &Package{ - PkgPath: pkgPath, - ID: id, - Name: pkgName, - Imports: make(map[string]*Package), - } - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} - } - } - if isXTest { - pkg.forTest = forTest - } - } - } - } - if !fileExists { - pkg.GoFiles = append(pkg.GoFiles, opath) - // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior - // if the file will be ignored due to its build tags. - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) - modifiedPkgsSet[pkg.ID] = true - } - imports, err := extractImports(opath, contents) - if err != nil { - // Let the parser or type checker report errors later. - continue - } - for _, imp := range imports { - // TODO(rstambler): If the package is an x test and the import has - // a test variant, make sure to replace it. - if _, found := pkg.Imports[imp]; found { - continue - } - overlayAddsImports = true - id, ok := havePkgs[imp] - if !ok { - var err error - id, err = state.resolveImport(dir, imp) - if err != nil { - return nil, nil, err - } - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as well. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} - } - } - } - - // toPkgPath guesses the package path given the id. - toPkgPath := func(sourceDir, id string) (string, error) { - if i := strings.IndexByte(id, ' '); i >= 0 { - return state.resolveImport(sourceDir, id[:i]) - } - return state.resolveImport(sourceDir, id) - } - - // Now that new packages have been created, do another pass to determine - // the new set of missing packages. - for _, pkg := range response.dr.Packages { - for _, imp := range pkg.Imports { - if len(pkg.GoFiles) == 0 { - return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) - } - pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) - if err != nil { - return nil, nil, err - } - if _, ok := havePkgs[pkgPath]; !ok { - needPkgsSet[pkgPath] = true - } - } - } - - if overlayAddsImports { - needPkgs = make([]string, 0, len(needPkgsSet)) - for pkg := range needPkgsSet { - needPkgs = append(needPkgs, pkg) - } - } - modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) - for pkg := range modifiedPkgsSet { - modifiedPkgs = append(modifiedPkgs, pkg) - } - return modifiedPkgs, needPkgs, err -} - -// resolveImport finds the ID of a package given its import path. -// In particular, it will find the right vendored copy when in GOPATH mode. -func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { - env, err := state.getEnv() - if err != nil { - return "", err - } - if env["GOMOD"] != "" { - return importPath, nil - } - - searchDir := sourceDir - for { - vendorDir := filepath.Join(searchDir, "vendor") - exists, ok := state.vendorDirs[vendorDir] - if !ok { - info, err := os.Stat(vendorDir) - exists = err == nil && info.IsDir() - state.vendorDirs[vendorDir] = exists - } - - if exists { - vendoredPath := filepath.Join(vendorDir, importPath) - if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { - // We should probably check for .go files here, but shame on anyone who fools us. - path, ok, err := state.getPkgPath(vendoredPath) - if err != nil { - return "", err - } - if ok { - return path, nil - } - } - } - - // We know we've hit the top of the filesystem when we Dir / and get /, - // or C:\ and get C:\, etc. - next := filepath.Dir(searchDir) - if next == searchDir { - break - } - searchDir = next - } - return importPath, nil -} - -func hasTestFiles(p *Package) bool { - for _, f := range p.GoFiles { - if strings.HasSuffix(f, "_test.go") { - return true - } - } - return false -} - // determineRootDirs returns a mapping from absolute directories that could // contain code to their corresponding import path prefixes. func (state *golistState) determineRootDirs() (map[string]string, error) { @@ -384,192 +81,3 @@ func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { } return m, nil } - -func extractImports(filename string, contents []byte) ([]string, error) { - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? - if err != nil { - return nil, err - } - var res []string - for _, imp := range f.Imports { - quotedPath := imp.Path.Value - path, err := strconv.Unquote(quotedPath) - if err != nil { - return nil, err - } - res = append(res, path) - } - return res, nil -} - -// reclaimPackage attempts to reuse a package that failed to load in an overlay. -// -// If the package has errors and has no Name, GoFiles, or Imports, -// then it's possible that it doesn't yet exist on disk. -func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - if pkg.ID != id { - return false - } - if len(pkg.Errors) != 1 { - return false - } - if pkg.Name != "" || pkg.ExportFile != "" { - return false - } - if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { - return false - } - if len(pkg.Imports) > 0 { - return false - } - pkgName, ok := extractPackageName(filename, contents) - if !ok { - return false - } - pkg.Name = pkgName - pkg.Errors = nil - return true -} - -func extractPackageName(filename string, contents []byte) (string, bool) { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? - if err != nil { - return "", false - } - return f.Name.Name, true -} - -// commonDir returns the directory that all files are in, "" if files is empty, -// or an error if they aren't in the same directory. -func commonDir(files []string) (string, error) { - seen := make(map[string]bool) - for _, f := range files { - seen[filepath.Dir(f)] = true - } - if len(seen) > 1 { - return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) - } - for k := range seen { - // seen has only one element; return it. - return k, nil - } - return "", nil // no files -} - -// It is possible that the files in the disk directory dir have a different package -// name from newName, which is deduced from the overlays. If they all have a different -// package name, and they all have the same package name, then that name becomes -// the package name. -// It returns true if it changes the package name, false otherwise. -func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { - names := make(map[string]int) - for _, p := range pkgsOfDir { - names[p.Name]++ - } - if len(names) != 1 { - // some files are in different packages - return - } - var oldName string - for k := range names { - oldName = k - } - if newName == oldName { - return - } - // We might have a case where all of the package names in the directory are - // the same, but the overlay file is for an x test, which belongs to its - // own package. If the x test does not yet exist on disk, we may not yet - // have its package name on disk, but we should not rename the packages. - // - // We use a heuristic to determine if this file belongs to an x test: - // The test file should have a package name whose package name has a _test - // suffix or looks like "newName_test". - maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") - if isTestFile && maybeXTest { - return - } - for _, p := range pkgsOfDir { - p.Name = newName - } -} - -// This function is copy-pasted from -// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. -// It should be deleted when we remove support for overlays from go/packages. -// -// NOTE: This does not handle any ./... or ./ style queries, as this function -// doesn't know the working directory. -// -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separated pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index da1a27eea6..81e9e6a727 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,7 +16,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -28,8 +27,8 @@ import ( "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -220,8 +219,10 @@ type driverResponse struct { // lists of multiple drivers, go/packages will fall back to the next driver. NotHandled bool - // Sizes, if not nil, is the types.Sizes to use when type checking. - Sizes *types.StdSizes + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string // Roots is the set of package IDs that make up the root packages. // We have to encode this separately because when we encode a single package @@ -257,31 +258,52 @@ type driverResponse struct { // proceeding with further analysis. The PrintErrors function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { - l := newLoader(cfg) - response, err := defaultDriver(&l.Config, patterns...) + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) if err != nil { return nil, err } - l.sizes = response.Sizes - return l.refine(response) + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) } // defaultDriver is a driver that implements go/packages' fallback behavior. // It will try to request to an external driver, if one exists. If there's // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - driver := findExternalDriver(cfg) - if driver == nil { - driver = goListDriver - } - response, err := driver(cfg, patterns...) - if err != nil { - return response, err - } else if response.NotHandled { - return goListDriver(cfg, patterns...) +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { + if driver := findExternalDriver(cfg); driver != nil { + response, err := driver(cfg, patterns...) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // (fall through) } - return response, nil + + response, err := goListDriver(cfg, patterns...) + return response, false, err } // A Package describes a loaded Go package. @@ -410,12 +432,6 @@ func init() { packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { - return config.(*Config).gocmdRunner - } - packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { - config.(*Config).gocmdRunner = runner - } packagesinternal.SetModFile = func(config interface{}, value string) { config.(*Config).modFile = value } @@ -552,7 +568,7 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes + sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue parseCacheMu sync.Mutex exportMu sync.Mutex // enforces mutual exclusion of exportdata operations @@ -677,39 +693,38 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } } - // Materialize the import graph. - - const ( - white = 0 // new - grey = 1 // in progress - black = 2 // complete - ) - - // visit traverses the import graph, depth-first, - // and materializes the graph as Packages.Imports. - // - // Valid imports are saved in the Packages.Import map. - // Invalid imports (cycles and missing nodes) are saved in the importErrors map. - // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. - // - // visit returns whether the package needs src or has a transitive - // dependency on a package that does. These are the only packages - // for which we load source code. - var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - var srcPkgs []*loaderPackage - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: - panic("internal error: grey node") - } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - // If NeedImports isn't set, the imports fields will all be zeroed out. - if ld.Mode&NeedImports != 0 { + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports lpkg.Imports = make(map[string]*Package, len(stubs)) for importPath, ipkg := range stubs { var importErr error @@ -733,40 +748,39 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } lpkg.Imports[importPath] = imp.Package } - } - if lpkg.needsrc { - srcPkgs = append(srcPkgs, lpkg) - } - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - return lpkg.needsrc - } + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } - if ld.Mode&NeedImports == 0 { - // We do this to drop the stub import packages that we are not even going to try to resolve. - for _, lpkg := range initial { - lpkg.Imports = nil + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc } - } else { + // For each initial package, create its import DAG. for _, lpkg := range initial { visit(lpkg) } - } - if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true - } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil } } + // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { @@ -1000,10 +1014,11 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(lpkg.TypesInfo) + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1041,7 +1056,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, Error: appendError, - Sizes: ld.sizes, + Sizes: ld.sizes, // may be nil } if lpkg.Module != nil && lpkg.Module.GoVersion != "" { typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) @@ -1125,7 +1140,7 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var err error if src == nil { ioLimit <- true // wait - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) <-ioLimit // signal } if err != nil { diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index c725d839ba..11d5c8c3ad 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -26,10 +26,8 @@ package objectpath import ( "fmt" "go/types" - "sort" "strconv" "strings" - _ "unsafe" "golang.org/x/tools/internal/typeparams" ) @@ -122,17 +120,7 @@ func For(obj types.Object) (Path, error) { // An Encoder amortizes the cost of encoding the paths of multiple objects. // The zero value of an Encoder is ready to use. type Encoder struct { - scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() - skipMethodSorting bool -} - -// Exposed to gopls via golang.org/x/tools/internal/typesinternal -// TODO(golang/go#61443): eliminate this parameter one way or the other. -// -//go:linkname skipMethodSorting -func skipMethodSorting(enc *Encoder) { - enc.skipMethodSorting = true + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects } // For returns the path to an object relative to its package, @@ -235,7 +223,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + if _, ok := obj.Type().(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -295,7 +283,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } } else { if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { // generic named type return Path(r), nil } @@ -324,31 +312,18 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Inspect declared methods of defined types. if T, ok := o.Type().(*types.Named); ok { path = append(path, opType) - if !enc.skipMethodSorting { - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - for i, m := range enc.namedMethods(T) { - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method } - } else { - // This branch must match the logic in the branch above, using go/types - // APIs without sorting. - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil } } } @@ -444,22 +419,13 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { path = append(path, name...) path = append(path, opType) - if !enc.skipMethodSorting { - for i, m := range enc.namedMethods(named) { - if m == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } - } - } else { - // This branch must match the logic of the branch above, using go/types - // APIs without sorting. - for i := 0; i < named.NumMethods(); i++ { - m := named.Method(i) - if m == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true } } @@ -496,7 +462,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -539,7 +505,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } } return nil - case *typeparams.TypeParam: + case *types.TypeParam: name := T.Obj() if name == obj { return append(path, opObj) @@ -559,7 +525,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, opTypeParam, i) @@ -572,17 +538,11 @@ func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte // Object returns the object denoted by path p within the package pkg. func Object(pkg *types.Package, p Path) (types.Object, error) { - return object(pkg, p, false) -} - -// Note: the skipMethodSorting parameter must match the value of -// Encoder.skipMethodSorting used during encoding. -func object(pkg *types.Package, p Path, skipMethodSorting bool) (types.Object, error) { - if p == "" { + pathstr := string(p) + if pathstr == "" { return nil, fmt.Errorf("empty path") } - pathstr := string(p) var pkgobj, suffix string if dot := strings.IndexByte(pathstr, opType); dot < 0 { pkgobj = pathstr @@ -602,7 +562,7 @@ func object(pkg *types.Package, p Path, skipMethodSorting bool) (types.Object, e } // abstraction of *types.{Named,Signature} type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList + TypeParams() *types.TypeParamList } // abstraction of *types.{Named,TypeParam} type hasObj interface { @@ -704,7 +664,7 @@ func object(pkg *types.Package, p Path, skipMethodSorting bool) (types.Object, e t = tparams.At(index) case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) + tparam, ok := t.(*types.TypeParam) if !ok { return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) } @@ -744,12 +704,7 @@ func object(pkg *types.Package, p Path, skipMethodSorting bool) (types.Object, e if index >= t.NumMethods() { return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) } - if skipMethodSorting { - obj = t.Method(index) - } else { - methods := namedMethods(t) // (unmemoized) - obj = methods[index] // Id-ordered - } + obj = t.Method(index) default: return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) @@ -776,33 +731,6 @@ func object(pkg *types.Package, p Path, skipMethodSorting bool) (types.Object, e return obj, nil // success } -// namedMethods returns the methods of a Named type in ascending Id order. -func namedMethods(named *types.Named) []*types.Func { - methods := make([]*types.Func, named.NumMethods()) - for i := range methods { - methods[i] = named.Method(i) - } - sort.Slice(methods, func(i, j int) bool { - return methods[i].Id() < methods[j].Id() - }) - return methods -} - -// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. -func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { - m := enc.namedMethodsMemo - if m == nil { - m = make(map[*types.Named][]*types.Func) - enc.namedMethodsMemo = m - } - methods, ok := m[named] - if !ok { - methods = namedMethods(named) // allocates and sorts - m[named] = methods - } - return methods -} - // scopeObjects is a memoization of scope objects. // Callers must not modify the result. func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 0000000000..c0e8e731c9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index b1223713b9..2d078ccb19 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -29,7 +29,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -221,7 +220,7 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func switch hdr { case "$$B\n": var data []byte - data, err = ioutil.ReadAll(buf) + data, err = io.ReadAll(buf) if err != nil { break } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 6103dd7102..2ee8c70164 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -24,7 +24,6 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/tokeninternal" - "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. @@ -481,7 +480,7 @@ func (p *iexporter) doDecl(obj types.Object) { } // Function. - if typeparams.ForSignature(sig).Len() == 0 { + if sig.TypeParams().Len() == 0 { w.tag('F') } else { w.tag('G') @@ -494,7 +493,7 @@ func (p *iexporter) doDecl(obj types.Object) { // // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + if tparams := sig.TypeParams(); tparams.Len() > 0 { w.tparamList(obj.Name(), tparams, obj.Pkg()) } w.signature(sig) @@ -507,14 +506,14 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*typeparams.TypeParam); ok { + if tparam, ok := t.(*types.TypeParam); ok { w.tag('P') w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = typeparams.IsImplicit(iface) + implicit = iface.IsImplicit() } w.bool(implicit) } @@ -535,17 +534,17 @@ func (p *iexporter) doDecl(obj types.Object) { panic(internalErrorf("%s is not a defined type", t)) } - if typeparams.ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { w.tag('T') } else { w.tag('U') } w.pos(obj.Pos()) - if typeparams.ForNamed(named).Len() > 0 { + if named.TypeParams().Len() > 0 { // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) } underlying := obj.Type().Underlying() @@ -565,7 +564,7 @@ func (p *iexporter) doDecl(obj types.Object) { // Receiver type parameters are type arguments of the receiver type, so // their name must be qualified before exporting recv. - if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() for i := 0; i < rparams.Len(); i++ { rparam := rparams.At(i) @@ -740,19 +739,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } switch t := t.(type) { case *types.Named: - if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) // TODO(rfindley): investigate if this position is correct, and if it // matters. w.pos(t.Obj().Pos()) w.typeList(targs, pkg) - w.typ(typeparams.NamedTypeOrigin(t), pkg) + w.typ(t.Origin(), pkg) return } w.startType(definedType) w.qualifiedType(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: w.startType(typeParamType) w.qualifiedType(t.Obj()) @@ -868,7 +867,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.signature(sig) } - case *typeparams.Union: + case *types.Union: w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) @@ -948,14 +947,14 @@ func (w *exportWriter) signature(sig *types.Signature) { } } -func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) for i := 0; i < ts.Len(); i++ { w.typ(ts.At(i), pkg) } } -func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) for i := 0; i < list.Len(); i++ { @@ -973,7 +972,7 @@ const blankMarker = "$" // differs from its actual object name: it is prefixed with a qualifier, and // blank type parameter names are disambiguated by their index in the type // parameter list. -func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { +func tparamExportName(prefix string, tparam *types.TypeParam) string { assert(prefix != "") name := tparam.Obj().Name() if name == "_" { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 8e64cf644f..9bde15e3bc 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -22,7 +22,6 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" - "golang.org/x/tools/internal/typeparams" ) type intReader struct { @@ -321,7 +320,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Therefore, we defer calling SetConstraint there, and call it here instead // after all types are complete. for _, d := range p.later { - typeparams.SetTypeParamConstraint(d.t, d.constraint) + d.t.SetConstraint(d.constraint) } for _, typ := range p.interfaceList { @@ -339,7 +338,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } type setConstraintArgs struct { - t *typeparams.TypeParam + t *types.TypeParam constraint types.Type } @@ -549,7 +548,7 @@ func (r *importReader) obj(name string) { r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) case 'F', 'G': - var tparams []*typeparams.TypeParam + var tparams []*types.TypeParam if tag == 'G' { tparams = r.tparamList() } @@ -566,7 +565,7 @@ func (r *importReader) obj(name string) { r.declare(obj) if tag == 'U' { tparams := r.tparamList() - typeparams.SetForNamed(named, tparams) + named.SetTypeParams(tparams) } underlying := r.p.typAt(r.uint64(), named).Underlying() @@ -583,12 +582,12 @@ func (r *importReader) obj(name string) { // typeparams being used in the method sig/body). base := baseType(recv.Type()) assert(base != nil) - targs := typeparams.NamedTypeArgs(base) - var rparams []*typeparams.TypeParam + targs := base.TypeArgs() + var rparams []*types.TypeParam if targs.Len() > 0 { - rparams = make([]*typeparams.TypeParam, targs.Len()) + rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*typeparams.TypeParam) + rparams[i] = targs.At(i).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -606,7 +605,7 @@ func (r *importReader) obj(name string) { } name0 := tparamName(name) tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := typeparams.NewTypeParam(tn, nil) + t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. @@ -622,7 +621,7 @@ func (r *importReader) obj(name string) { if iface == nil { errorf("non-interface constraint marked implicit") } - typeparams.MarkImplicit(iface) + iface.MarkImplicit() } // The constraint type may not be complete, if we // are in the middle of a type recursion involving type @@ -966,7 +965,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // The imported instantiated type doesn't include any methods, so // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment - t, _ := typeparams.Instantiate(nil, baseType, targs, false) + t, _ := types.Instantiate(nil, baseType, targs, false) // Workaround for golang/go#61561. See the doc for instanceList for details. r.p.instanceList = append(r.p.instanceList, t) @@ -976,11 +975,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if r.p.version < iexportVersionGenerics { errorf("unexpected instantiation type") } - terms := make([]*typeparams.Term, r.uint64()) + terms := make([]*types.Term, r.uint64()) for i := range terms { - terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + terms[i] = types.NewTerm(r.bool(), r.typ()) } - return typeparams.NewUnion(terms) + return types.NewUnion(terms) } } @@ -1008,23 +1007,23 @@ func (r *importReader) objectPathObject() types.Object { return obj } -func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() variadic := params.Len() > 0 && r.bool() - return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } -func (r *importReader) tparamList() []*typeparams.TypeParam { +func (r *importReader) tparamList() []*types.TypeParam { n := r.uint64() if n == 0 { return nil } - xs := make([]*typeparams.TypeParam, n) + xs := make([]*types.TypeParam, n) for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*typeparams.TypeParam) + xs[i] = r.typ().(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 53cf66da01..55312522dc 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -13,6 +13,7 @@ import ( "io" "log" "os" + "os/exec" "reflect" "regexp" "runtime" @@ -21,8 +22,6 @@ import ( "sync" "time" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/event/keys" "golang.org/x/tools/internal/event/label" @@ -85,6 +84,7 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. +// Postcondition: both error results have same nilness. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) defer done() @@ -95,23 +95,24 @@ func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) // If we encounter a load concurrency error, we need to retry serially. - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) } - event.Error(ctx, "Load concurrency error, will retry serially", err) - // Run serially by calling runPiped. - stdout.Reset() - stderr.Reset() - friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { // Wait for 1 worker to become available. select { case <-ctx.Done(): - return nil, nil, nil, ctx.Err() + return nil, nil, ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: defer func() { <-runner.inFlight }() } @@ -121,6 +122,7 @@ func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { // Make sure the runner is always initialized. runner.initialize() @@ -129,7 +131,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // runPiped commands. select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.serialized <- struct{}{}: defer func() { <-runner.serialized }() } @@ -139,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde for i := 0; i < maxInFlight; i++ { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: // Make sure we always "return" any workers we took. defer func() { <-runner.inFlight }() @@ -172,6 +174,7 @@ type Invocation struct { Logf func(format string, args ...interface{}) } +// Postcondition: both error results have same nilness. func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { rawError = i.run(ctx, stdout, stderr) if rawError != nil { diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index d9950b1f0b..44719de173 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -import ( - "golang.org/x/tools/internal/gocommand" -) - var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } @@ -18,10 +14,6 @@ type PackageError struct { Err string // the error itself } -var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } - -var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} - var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var ForTest int // must be set as a LoadMode to call GetForTest diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index d0d0649fe2..cdab988531 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -42,7 +42,7 @@ func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Ex switch e := n.(type) { case *ast.IndexExpr: return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: + case *ast.IndexListExpr: return e.X, e.Lbrack, e.Indices, e.Rbrack } return nil, token.NoPos, nil, token.NoPos @@ -63,7 +63,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke Rbrack: rbrack, } default: - return &IndexListExpr{ + return &ast.IndexListExpr{ X: x, Lbrack: lbrack, Indices: indices, @@ -74,7 +74,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke // IsTypeParam reports whether t is a type parameter. func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) + _, ok := t.(*types.TypeParam) return ok } @@ -100,11 +100,11 @@ func OriginMethod(fn *types.Func) *types.Func { // Receiver is a *types.Interface. return fn } - if ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { // Receiver base has no type parameters, so we can avoid the lookup below. return fn } - orig := NamedTypeOrigin(named) + orig := named.Origin() gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: @@ -157,7 +157,7 @@ func OriginMethod(fn *types.Func) *types.Func { // // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { // If V and T are not both named, or do not have matching non-empty type // parameter lists, fall back on types.AssignableTo. @@ -167,9 +167,9 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { return types.AssignableTo(V, T) } - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { return types.AssignableTo(V, T) } @@ -182,7 +182,7 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { // Minor optimization: ensure we share a context across the two // instantiations below. if ctxt == nil { - ctxt = NewContext() + ctxt = types.NewContext() } var targs []types.Type @@ -190,12 +190,12 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { targs = append(targs, vtparams.At(i)) } - vinst, err := Instantiate(ctxt, V, targs, true) + vinst, err := types.Instantiate(ctxt, V, targs, true) if err != nil { panic("type parameters should satisfy their own constraints") } - tinst, err := Instantiate(ctxt, T, targs, true) + tinst, err := types.Instantiate(ctxt, T, targs, true) if err != nil { return false } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 993135ec90..7ea8840eab 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -81,13 +81,13 @@ func CoreType(T types.Type) types.Type { // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, @@ -108,15 +108,15 @@ func CoreType(T types.Type) types.Type { // // _NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { +func _NormalTerms(typ types.Type) ([]*types.Term, error) { switch typ := typ.(type) { - case *TypeParam: + case *types.TypeParam: return StructuralTerms(typ) - case *Union: + case *types.Union: return UnionTermSet(typ) case *types.Interface: return InterfaceTermSet(typ) default: - return []*Term{NewTerm(false, typ)}, nil + return []*types.Term{types.NewTerm(false, typ)}, nil } } diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390e1..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d67148823c..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 9c631b6512..93c80fdc96 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -60,7 +60,7 @@ var ErrEmptyTypeSet = errors.New("empty type set") // // StructuralTerms makes no guarantees about the order of terms, except that it // is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { constraint := tparam.Constraint() if constraint == nil { return nil, fmt.Errorf("%s has nil constraint", tparam) @@ -78,7 +78,7 @@ func StructuralTerms(tparam *TypeParam) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { return computeTermSet(iface) } @@ -88,11 +88,11 @@ func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func UnionTermSet(union *Union) ([]*Term, error) { +func UnionTermSet(union *types.Union) ([]*types.Term, error) { return computeTermSet(union) } -func computeTermSet(typ types.Type) ([]*Term, error) { +func computeTermSet(typ types.Type) ([]*types.Term, error) { tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) if err != nil { return nil, err @@ -103,9 +103,9 @@ func computeTermSet(typ types.Type) ([]*Term, error) { if tset.terms.isAll() { return nil, nil } - var terms []*Term + var terms []*types.Term for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) + terms = append(terms, types.NewTerm(term.tilde, term.typ)) } return terms, nil } @@ -162,7 +162,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in tset.terms = allTermlist for i := 0; i < u.NumEmbeddeds(); i++ { embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { + if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } tset2, err := computeTermSetInternal(embedded, seen, depth+1) @@ -171,7 +171,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in } tset.terms = tset.terms.intersect(tset2.terms) } - case *Union: + case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil for i := 0; i < u.Len(); i++ { @@ -184,7 +184,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, err } terms = tset2.terms - case *TypeParam, *Union: + case *types.TypeParam, *types.Union: // A stand-alone type parameter or union is not permitted as union // term. return nil, fmt.Errorf("invalid union term %T", t) @@ -199,7 +199,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) } } - case *TypeParam: + case *types.TypeParam: panic("unreachable") default: // For all other types, the term set is just a single non-tilde term diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 933106a23d..cbd12f8013 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -30,7 +30,7 @@ func (xl termlist) String() string { var buf bytes.Buffer for i, x := range xl { if i > 0 { - buf.WriteString(" ∪ ") + buf.WriteString(" | ") } buf.WriteString(x.String()) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index 7ed86e1711..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) *types.Named { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index cf301af1db..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) *types.Named { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go index 7ddee28d98..7350bb702a 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -10,11 +10,10 @@ import "go/types" // A term describes elementary type sets: // -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t type term struct { tilde bool // valid if typ != nil typ types.Type diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 66e8b099bd..ce7d4351b2 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,8 +11,6 @@ import ( "go/types" "reflect" "unsafe" - - "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -52,17 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } var SetGoVersion = func(conf *types.Config, version string) bool { return false } - -// SkipEncoderMethodSorting marks the encoder as not requiring sorted methods, -// as an optimization for gopls (which guarantees the order of parsed source files). -// -// TODO(golang/go#61443): eliminate this parameter one way or the other. -// -//go:linkname SkipEncoderMethodSorting golang.org/x/tools/go/types/objectpath.skipMethodSorting -func SkipEncoderMethodSorting(enc *objectpath.Encoder) - -// ObjectpathObject is like objectpath.Object, but allows suppressing method -// sorting (which is not necessary for gopls). -// -//go:linkname ObjectpathObject golang.org/x/tools/go/types/objectpath.object -func ObjectpathObject(pkg *types.Package, p objectpath.Path, skipMethodSorting bool) (types.Object, error) diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 0000000000..bbabcd22e9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 0000000000..562eef21fa --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 0000000000..a7b79207ae --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions always reports the a file's Go version as the +// zero version at this Go version. +func FileVersions(info *types.Info, file *ast.File) string { return "" } + +// InitFileVersions is a noop at this Go version. +func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 0000000000..7b9ba89a82 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions maps a file to the file's semantic Go version. +// The reported version is the zero version if a version cannot be determined. +func FileVersions(info *types.Info, file *ast.File) string { + return info.FileVersions[file] +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go121.go b/vendor/golang.org/x/tools/internal/versions/versions_go121.go new file mode 100644 index 0000000000..cf4a7d0360 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions_go121.go @@ -0,0 +1,49 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go122.go b/vendor/golang.org/x/tools/internal/versions/versions_go122.go new file mode 100644 index 0000000000..c1c1814b28 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions_go122.go @@ -0,0 +1,38 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/version" +) + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { return version.Lang(x) } + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return version.Compare(x, y) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return version.IsValid(x) } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index 575456c838..27d277927c 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -142,7 +142,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -404,7 +404,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index c199702fbd..9ae3af8f11 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -158,7 +158,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -419,7 +419,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 8a28614c0b..434e266fde 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -170,7 +170,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // When specified, it should match one the container or initContainer // names in the pod template. // +optional - ContainerName *string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + ContainerName *string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are @@ -230,14 +230,14 @@ type PodFailurePolicyRule struct { // Represents the requirement on the container exit codes. // +optional - OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes" protobuf:"bytes,2,opt,name=onExitCodes"` + OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes,omitempty" protobuf:"bytes,2,opt,name=onExitCodes"` // Represents the requirement on the pod conditions. The requirement is represented // as a list of pod condition patterns. The requirement is satisfied if at // least one pattern matches an actual pod condition. At most 20 elements are allowed. // +listType=atomic // +optional - OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions" protobuf:"bytes,3,opt,name=onPodConditions"` + OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions,omitempty" protobuf:"bytes,3,opt,name=onPodConditions"` } // PodFailurePolicy describes how failed pods influence the backoffLimit. diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go index 38b80a304a..3da5549442 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go @@ -177,7 +177,7 @@ func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val { return types.NewErr("composited variable %q fails to compile: %v", a.name, a.result.Error) } - v, details, err := a.result.Program.Eval(a.activation) + v, details, err := a.result.Program.ContextEval(a.context, a.activation) if details == nil { return types.NewErr("unable to get evaluation details of variable %q", a.name) } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go index b2624694c8..9cd3c01aed 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go @@ -180,8 +180,9 @@ func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string, celmetrics.Metrics.ObserveDefinition(context.TODO(), "active", "deny") } - // Skip reconcile if the spec of the definition is unchanged - if info.lastReconciledValue != nil && definition != nil && + // Skip reconcile if the spec of the definition is unchanged and had a + // successful previous sync + if info.configurationError == nil && info.lastReconciledValue != nil && definition != nil && apiequality.Semantic.DeepEqual(info.lastReconciledValue.Spec, definition.Spec) { return nil } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go index 61a7fd70de..254a223481 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go @@ -229,7 +229,6 @@ func (rdm *resourceDiscoveryManager) AddGroupVersion(source Source, groupName st } func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupName string, value apidiscoveryv2beta1.APIVersionDiscovery) { - klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version) if rdm.apiGroups == nil { rdm.apiGroups = make(map[groupKey]*apidiscoveryv2beta1.APIGroupDiscovery) @@ -273,6 +272,7 @@ func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupN } rdm.apiGroups[key] = group } + klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version) gv := metav1.GroupVersion{Group: groupName, Version: value.Version} gvKey := groupVersionKey{ diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 2276e2daf9..d06447a7e5 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -166,6 +166,13 @@ const ( // Deprecates and removes SelfLink from ObjectMeta and ListMeta. RemoveSelfLink featuregate.Feature = "RemoveSelfLink" + // owner: @serathius + // beta: v1.30 + // + // Allow watch cache to create a watch on a dedicated RPC. + // This prevents watch cache from being starved by other watches. + SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC" + // owner: @apelisse, @lavalamp // alpha: v1.14 // beta: v1.16 @@ -222,6 +229,12 @@ const ( // Enables support for watch bookmark events. WatchBookmark featuregate.Feature = "WatchBookmark" + // owner: @serathius + // beta: 1.30 + // Enables watches without resourceVersion to be served from storage. + // Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue. + WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion" + // owner: @vinaykul // kep: http://kep.k8s.io/1287 // alpha: v1.27 @@ -286,6 +299,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 @@ -298,6 +313,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, WatchList: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index d678f52dfb..047736e57d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -915,7 +915,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { requestWorkEstimator := flowcontrolrequest.NewWorkEstimator( c.StorageObjectCountTracker.Get, c.FlowControl.GetInterestedWatchCount, workEstimatorCfg, c.FlowControl.GetMaxSeats) handler = filterlatency.TrackCompleted(handler) - handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator) + handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator, c.RequestTimeout/4) handler = filterlatency.TrackStarted(handler, c.TracerProvider, "priorityandfairness") } else { handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc) diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index 6b39877816..05cc44263f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -35,6 +35,7 @@ import ( fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" "k8s.io/klog/v2" + utilsclock "k8s.io/utils/clock" ) // PriorityAndFairnessClassification identifies the results of @@ -78,6 +79,10 @@ type priorityAndFairnessHandler struct { // the purpose of computing RetryAfter header to avoid system // overload. droppedRequests utilflowcontrol.DroppedRequestsTracker + + // newReqWaitCtxFn creates a derived context with a deadline + // of how long a given request can wait in its queue. + newReqWaitCtxFn func(context.Context) (context.Context, context.CancelFunc) } func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Request) { @@ -240,8 +245,9 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque resultCh <- err }() - // We create handleCtx with explicit cancelation function. - // The reason for it is that Handle() underneath may start additional goroutine + // We create handleCtx with an adjusted deadline, for two reasons. + // One is to limit the time the request waits before its execution starts. + // The other reason for it is that Handle() underneath may start additional goroutine // that is blocked on context cancellation. However, from APF point of view, // we don't want to wait until the whole watch request is processed (which is // when it context is actually cancelled) - we want to unblock the goroutine as @@ -249,7 +255,7 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque // // Note that we explicitly do NOT call the actuall handler using that context // to avoid cancelling request too early. - handleCtx, handleCtxCancel := context.WithCancel(ctx) + handleCtx, handleCtxCancel := h.newReqWaitCtxFn(ctx) defer handleCtxCancel() // Note that Handle will return irrespective of whether the request @@ -286,7 +292,11 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque h.handler.ServeHTTP(w, r) } - h.fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute) + func() { + handleCtx, cancelFn := h.newReqWaitCtxFn(ctx) + defer cancelFn() + h.fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute) + }() } if !served { @@ -309,6 +319,7 @@ func WithPriorityAndFairness( longRunningRequestCheck apirequest.LongRunningRequestCheck, fcIfc utilflowcontrol.Interface, workEstimator flowcontrolrequest.WorkEstimatorFunc, + defaultRequestWaitLimit time.Duration, ) http.Handler { if fcIfc == nil { klog.Warningf("priority and fairness support not found, skipping") @@ -322,12 +333,18 @@ func WithPriorityAndFairness( waitingMark.mutatingObserver = fcmetrics.GetWaitingMutatingConcurrency() }) + clock := &utilsclock.RealClock{} + newReqWaitCtxFn := func(ctx context.Context) (context.Context, context.CancelFunc) { + return getRequestWaitContext(ctx, defaultRequestWaitLimit, clock) + } + priorityAndFairnessHandler := &priorityAndFairnessHandler{ handler: handler, longRunningRequestCheck: longRunningRequestCheck, fcIfc: fcIfc, workEstimator: workEstimator, droppedRequests: utilflowcontrol.NewDroppedRequestsTracker(), + newReqWaitCtxFn: newReqWaitCtxFn, } return http.HandlerFunc(priorityAndFairnessHandler.Handle) } @@ -356,3 +373,48 @@ func tooManyRequests(req *http.Request, w http.ResponseWriter, retryAfter string w.Header().Set("Retry-After", retryAfter) http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) } + +// getRequestWaitContext returns a new context with a deadline of how +// long the request is allowed to wait before it is removed from its +// queue and rejected. +// The context.CancelFunc returned must never be nil and the caller is +// responsible for calling the CancelFunc function for cleanup. +// - ctx: the context associated with the request (it may or may +// not have a deadline). +// - defaultRequestWaitLimit: the default wait duration that is used +// if the request context does not have any deadline. +// (a) initialization of a watch or +// (b) a request whose context has no deadline +// +// clock comes in handy for testing the function +func getRequestWaitContext(ctx context.Context, defaultRequestWaitLimit time.Duration, clock utilsclock.PassiveClock) (context.Context, context.CancelFunc) { + if ctx.Err() != nil { + return ctx, func() {} + } + + reqArrivedAt := clock.Now() + if reqReceivedTimestamp, ok := apirequest.ReceivedTimestampFrom(ctx); ok { + reqArrivedAt = reqReceivedTimestamp + } + + // a) we will allow the request to wait in the queue for one + // fourth of the time of its allotted deadline. + // b) if the request context does not have any deadline + // then we default to 'defaultRequestWaitLimit' + // in any case, the wait limit for any request must not + // exceed the hard limit of 1m + // + // request has deadline: + // wait-limit = min(remaining deadline / 4, 1m) + // request has no deadline: + // wait-limit = min(defaultRequestWaitLimit, 1m) + thisReqWaitLimit := defaultRequestWaitLimit + if deadline, ok := ctx.Deadline(); ok { + thisReqWaitLimit = deadline.Sub(reqArrivedAt) / 4 + } + if thisReqWaitLimit > time.Minute { + thisReqWaitLimit = time.Minute + } + + return context.WithDeadline(ctx, reqArrivedAt.Add(thisReqWaitLimit)) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index 69f8fb5155..5d031e202e 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -154,7 +154,6 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { config.SharedInformerFactory, kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta3(), config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, - config.RequestTimeout/4, ) } else { klog.Warningf("Neither kubeconfig is provided nor service-account is mounted, so APIPriorityAndFairness will be disabled") diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 0796f591d7..ec5b5b267d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -26,6 +26,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "google.golang.org/grpc/metadata" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -51,7 +52,8 @@ import ( ) var ( - emptyFunc = func(bool) {} + emptyFunc = func(bool) {} + coreNamespaceResource = schema.GroupResource{Group: "", Resource: "namespaces"} ) const ( @@ -401,10 +403,18 @@ func NewCacherFromConfig(config Config) (*Cacher, error) { // so that future reuse does not get a spurious timeout. <-cacher.timer.C } - progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock) + var contextMetadata metadata.MD + if utilfeature.DefaultFeatureGate.Enabled(features.SeparateCacheWatchRPC) { + // Add grpc context metadata to watch and progress notify requests done by cacher to: + // * Prevent starvation of watch opened by cacher, by moving it to separate Watch RPC than watch request that bypass cacher. + // * Ensure that progress notification requests are executed on the same Watch RPC as their watch, which is required for it to work. + contextMetadata = metadata.New(map[string]string{"source": "cache"}) + } + + progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock, contextMetadata) watchCache := newWatchCache( config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource, progressRequester) - listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc, contextMetadata) reflectorName := "storage/cacher.go:" + config.ResourcePrefix reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) @@ -517,7 +527,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) && opts.SendInitialEvents != nil { opts.SendInitialEvents = nil } - if opts.SendInitialEvents == nil && opts.ResourceVersion == "" { + if utilfeature.DefaultFeatureGate.Enabled(features.WatchFromStorageWithoutResourceVersion) && opts.SendInitialEvents == nil && opts.ResourceVersion == "" { return c.storage.Watch(ctx, key, opts) } requestedWatchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) @@ -543,6 +553,12 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions scope.name = selectorName } + // for request like '/api/v1/watch/namespaces/*', set scope.namespace to empty. + // namespaces don't populate metadata.namespace in ObjFields. + if c.groupResource == coreNamespaceResource && len(scope.namespace) > 0 && scope.namespace == scope.name { + scope.namespace = "" + } + triggerValue, triggerSupported := "", false if c.indexedTrigger != nil { for _, field := range pred.IndexFields { diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go index 1252e5e349..2817a93dd0 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go @@ -19,6 +19,8 @@ package cacher import ( "context" + "google.golang.org/grpc/metadata" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -30,17 +32,19 @@ import ( // listerWatcher opaques storage.Interface to expose cache.ListerWatcher. type listerWatcher struct { - storage storage.Interface - resourcePrefix string - newListFunc func() runtime.Object + storage storage.Interface + resourcePrefix string + newListFunc func() runtime.Object + contextMetadata metadata.MD } // NewListerWatcher returns a storage.Interface backed ListerWatcher. -func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { +func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object, contextMetadata metadata.MD) cache.ListerWatcher { return &listerWatcher{ - storage: storage, - resourcePrefix: resourcePrefix, - newListFunc: newListFunc, + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + contextMetadata: contextMetadata, } } @@ -59,7 +63,11 @@ func (lw *listerWatcher) List(options metav1.ListOptions) (runtime.Object, error Predicate: pred, Recursive: true, } - if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil { + ctx := context.Background() + if lw.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata) + } + if err := lw.storage.GetList(ctx, lw.resourcePrefix, storageOpts, list); err != nil { return nil, err } return list, nil @@ -73,5 +81,9 @@ func (lw *listerWatcher) Watch(options metav1.ListOptions) (watch.Interface, err Recursive: true, ProgressNotify: true, } - return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts) + ctx := context.Background() + if lw.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata) + } + return lw.storage.Watch(ctx, lw.resourcePrefix, opts) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index c26eb55dac..c27ca053b7 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -492,8 +492,7 @@ func (s sortableStoreElements) Swap(i, j int) { // WaitUntilFreshAndList returns list of pointers to `storeElement` objects along // with their ResourceVersion and the name of the index, if any, that was used. -func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) { - var err error +func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) { if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && w.notFresh(resourceVersion) { w.waitingUntilFresh.Add() err = w.waitUntilFreshAndBlock(ctx, resourceVersion) @@ -501,12 +500,14 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion } else { err = w.waitUntilFreshAndBlock(ctx, resourceVersion) } + + defer func() { sort.Sort(sortableStoreElements(result)) }() defer w.RUnlock() if err != nil { - return nil, 0, "", err + return result, rv, index, err } - result, rv, index, err := func() ([]interface{}, uint64, string, error) { + result, rv, index, err = func() ([]interface{}, uint64, string, error) { // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we // want - they will be filtered out later. The fact that we return less things is only further performance improvement. @@ -519,7 +520,6 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion return w.store.List(), w.resourceVersion, "", nil }() - sort.Sort(sortableStoreElements(result)) return result, rv, index, err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go index f44ca9325b..13f50bc187 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go @@ -21,6 +21,8 @@ import ( "sync" "time" + "google.golang.org/grpc/metadata" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -34,10 +36,11 @@ const ( progressRequestPeriod = 100 * time.Millisecond ) -func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory) *conditionalProgressRequester { +func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory, contextMetadata metadata.MD) *conditionalProgressRequester { pr := &conditionalProgressRequester{ clock: clock, requestWatchProgress: requestWatchProgress, + contextMetadata: contextMetadata, } pr.cond = sync.NewCond(pr.mux.RLocker()) return pr @@ -54,6 +57,7 @@ type TickerFactory interface { type conditionalProgressRequester struct { clock TickerFactory requestWatchProgress WatchProgressRequester + contextMetadata metadata.MD mux sync.RWMutex cond *sync.Cond @@ -63,6 +67,9 @@ type conditionalProgressRequester struct { func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) { ctx := wait.ContextForChannel(stopCh) + if pr.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, pr.contextMetadata) + } go func() { defer utilruntime.HandleCrash() <-stopCh diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index ac023d55d8..e3147323d7 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -84,7 +84,7 @@ var ( }, []string{"endpoint"}, ) - storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"cluster"}, nil, compbasemetrics.ALPHA, "") + storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"storage_cluster_id"}, nil, compbasemetrics.ALPHA, "") storageMonitor = &monitorCollector{monitorGetter: func() ([]Monitor, error) { return nil, nil }} etcdEventsReceivedCounts = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ @@ -274,21 +274,21 @@ func (c *monitorCollector) CollectWithStability(ch chan<- compbasemetrics.Metric } for i, m := range monitors { - cluster := fmt.Sprintf("etcd-%d", i) + storageClusterID := fmt.Sprintf("etcd-%d", i) - klog.V(4).InfoS("Start collecting storage metrics", "cluster", cluster) + klog.V(4).InfoS("Start collecting storage metrics", "storage_cluster_id", storageClusterID) ctx, cancel := context.WithTimeout(context.Background(), time.Second) metrics, err := m.Monitor(ctx) cancel() m.Close() if err != nil { - klog.InfoS("Failed to get storage metrics", "cluster", cluster, "err", err) + klog.InfoS("Failed to get storage metrics", "storage_cluster_id", storageClusterID, "err", err) continue } - metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), cluster) + metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), storageClusterID) if err != nil { - klog.ErrorS(err, "Failed to create metric", "cluster", cluster) + klog.ErrorS(err, "Failed to create metric", "storage_cluster_id", storageClusterID) } ch <- metric } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index 708bf2cdef..8c90811bf4 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -150,9 +150,6 @@ type configController struct { // from server configuration. serverConcurrencyLimit int - // requestWaitLimit comes from server configuration. - requestWaitLimit time.Duration - // watchTracker implements the necessary WatchTracker interface. WatchTracker @@ -263,9 +260,15 @@ type seatDemandStats struct { } func (stats *seatDemandStats) update(obs fq.IntegratorResults) { + stats.highWatermark = obs.Max + if obs.Duration <= 0 { + return + } + if math.IsNaN(obs.Deviation) { + obs.Deviation = 0 + } stats.avg = obs.Average stats.stdDev = obs.Deviation - stats.highWatermark = obs.Max envelope := obs.Average + obs.Deviation stats.smoothed = math.Max(envelope, seatDemandSmoothingCoefficient*stats.smoothed+(1-seatDemandSmoothingCoefficient)*envelope) } @@ -281,13 +284,12 @@ func newTestableController(config TestableConfig) *configController { asFieldManager: config.AsFieldManager, foundToDangling: config.FoundToDangling, serverConcurrencyLimit: config.ServerConcurrencyLimit, - requestWaitLimit: config.RequestWaitLimit, flowcontrolClient: config.FlowcontrolClient, priorityLevelStates: make(map[string]*priorityLevelState), WatchTracker: NewWatchTracker(), MaxSeatsTracker: NewMaxSeatsTracker(), } - klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, requestWaitLimit=%s, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.requestWaitLimit, cfgCtlr.name, cfgCtlr.asFieldManager) + klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager) // Start with longish delay because conflicts will be between // different processes, so take some time to go away. cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") @@ -427,7 +429,7 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta plState := plStates[plName] if setCompleters { qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues, - plState.pl, cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs, + plState.pl, plState.reqsGaugePair, plState.execSeatsObs, metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge)) if err != nil { klog.ErrorS(err, "Inconceivable! Configuration error in existing priority level", "pl", plState.pl) @@ -651,10 +653,10 @@ func (cfgCtlr *configController) lockAndDigestConfigObjects(newPLs []*flowcontro // Supply missing mandatory PriorityLevelConfiguration objects if !meal.haveExemptPL { - meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt, cfgCtlr.requestWaitLimit) + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt) } if !meal.haveCatchAllPL { - meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll, cfgCtlr.requestWaitLimit) + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll) } meal.finishQueueSetReconfigsLocked() @@ -686,7 +688,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi } } qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, - pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs, + pl, state.reqsGaugePair, state.execSeatsObs, metrics.NewUnionGauge(state.seatDemandIntegrator, state.seatDemandRatioedGauge)) if err != nil { klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err) @@ -792,7 +794,7 @@ func (meal *cfgMeal) processOldPLsLocked() { } var err error plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, - plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs, + plState.pl, plState.reqsGaugePair, plState.execSeatsObs, metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge)) if err != nil { // This can not happen because queueSetCompleterForPL already approved this config @@ -874,7 +876,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() { // queueSetCompleterForPL returns an appropriate QueueSetCompleter for the // given priority level configuration. Returns nil and an error if the given // object is malformed in a way that is a problem for this package. -func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) { +func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) { if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementLimited) != (pl.Spec.Limited != nil) { return nil, errors.New("broken union structure at the top, for Limited") } @@ -896,7 +898,6 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow DesiredNumQueues: int(qcAPI.Queues), QueueLengthLimit: int(qcAPI.QueueLengthLimit), HandSize: int(qcAPI.HandSize), - RequestWaitLimit: requestWaitLimit, } } } else { @@ -950,16 +951,15 @@ func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangl // imaginePL adds a priority level based on one of the mandatory ones // that does not actually exist (right now) as a real API object. -func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) { +func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration) { klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name) labelValues := []string{proto.Name} reqsGaugePair := metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues) execSeatsObs := meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues) seatDemandIntegrator := fq.NewNamedIntegrator(meal.cfgCtlr.clock, proto.Name) seatDemandRatioedGauge := metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{proto.Name}) - qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, - requestWaitLimit, reqsGaugePair, execSeatsObs, - metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge)) + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, reqsGaugePair, + execSeatsObs, metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge)) if err != nil { // This can not happen because proto is one of the mandatory // objects and these are not erroneous diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go index 76782623a8..05f4f5e539 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -90,7 +90,6 @@ func New( informerFactory kubeinformers.SharedInformerFactory, flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface, serverConcurrencyLimit int, - requestWaitLimit time.Duration, ) Interface { clk := eventclock.Real{} return NewTestable(TestableConfig{ @@ -101,7 +100,6 @@ func New( InformerFactory: informerFactory, FlowcontrolClient: flowcontrolClient, ServerConcurrencyLimit: serverConcurrencyLimit, - RequestWaitLimit: requestWaitLimit, ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: fqs.NewQueueSetFactory(clk), @@ -139,9 +137,6 @@ type TestableConfig struct { // ServerConcurrencyLimit for the controller to enforce ServerConcurrencyLimit int - // RequestWaitLimit configured on the server - RequestWaitLimit time.Duration - // GaugeVec for metrics about requests, broken down by phase and priority_level ReqsGaugeVec metrics.RatioedGaugeVec diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go index 013fd41e08..3b0ad16387 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go @@ -18,7 +18,6 @@ package fairqueuing import ( "context" - "time" "k8s.io/apiserver/pkg/util/flowcontrol/debug" "k8s.io/apiserver/pkg/util/flowcontrol/metrics" @@ -117,7 +116,7 @@ type QueuingConfig struct { // DesiredNumQueues is the number of queues that the API says // should exist now. This may be non-positive, in which case - // QueueLengthLimit, HandSize, and RequestWaitLimit are ignored. + // QueueLengthLimit, and HandSize are ignored. // A value of zero means to respect the ConcurrencyLimit of the DispatchingConfig. // A negative value means to always dispatch immediately upon arrival // (i.e., the requests are "exempt" from limitation). @@ -129,10 +128,6 @@ type QueuingConfig struct { // HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly // dealing a "hand" of this many queues and then picking one of minimum length. HandSize int - - // RequestWaitLimit is the maximum amount of time that a request may wait in a queue. - // If, by the end of that time, the request has not been dispatched then it is rejected. - RequestWaitLimit time.Duration } // DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet. diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go index aa54a9ccf1..604e0862ab 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -272,7 +272,6 @@ func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig, } else { qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit qCfg.HandSize = qs.qCfg.HandSize - qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit } qs.qCfg = qCfg @@ -300,9 +299,6 @@ const ( // Serve this one decisionExecute requestDecision = iota - // Reject this one due to APF queuing considerations - decisionReject - // This one's context timed out / was canceled decisionCancel ) @@ -337,11 +333,10 @@ func (qs *queueSet) StartRequest(ctx context.Context, workEstimate *fqrequest.Wo // ======================================================================== // Step 1: // 1) Start with shuffle sharding, to pick a queue. - // 2) Reject old requests that have been waiting too long - // 3) Reject current request if there is not enough concurrency shares and + // 2) Reject current request if there is not enough concurrency shares and // we are at max queue length - // 4) If not rejected, create a request and enqueue - req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) + // 3) If not rejected, create a request and enqueue + req = qs.shuffleShardAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) // req == nil means that the request was rejected - no remaining // concurrency shares and at max queue length already if req == nil { @@ -422,13 +417,7 @@ func (req *request) wait() (bool, bool) { } req.waitStarted = true switch decisionAny { - case decisionReject: - klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2) - qs.totRequestsRejected++ - qs.totRequestsTimedout++ - metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") - return false, qs.isIdleLocked() - case decisionCancel: + case decisionCancel: // handle in code following this switch case decisionExecute: klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) return true, false @@ -438,7 +427,7 @@ func (req *request) wait() (bool, bool) { } // TODO(aaron-prindle) add metrics for this case klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) - // remove the request from the queue as it has timed out + // remove the request from the queue as its queue wait time has exceeded queue := req.queue if req.removeFromQueueLocked() != nil { defer qs.boundNextDispatchLocked(queue) @@ -446,8 +435,9 @@ func (req *request) wait() (bool, bool) { qs.totSeatsWaiting -= req.MaxSeats() qs.totRequestsRejected++ qs.totRequestsCancelled++ - metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled") + metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) + metrics.AddSeatsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -req.MaxSeats()) req.NoteQueued(false) qs.reqsGaugePair.RequestsWaiting.Add(-1) qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) @@ -555,25 +545,19 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 { return math.Min(float64(seatsRequested), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues) } -// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required +// shuffleShardAndRejectOrEnqueueLocked encapsulates the logic required // to validate and enqueue a request for the queueSet/QueueSet: // 1) Start with shuffle sharding, to pick a queue. -// 2) Reject old requests that have been waiting too long -// 3) Reject current request if there is not enough concurrency shares and +// 2) Reject current request if there is not enough concurrency shares and // we are at max queue length -// 4) If not rejected, create a request and enqueue +// 3) If not rejected, create a request and enqueue // returns the enqueud request on a successful enqueue // returns nil in the case that there is no available concurrency or // the queuelengthlimit has been reached -func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { +func (qs *queueSet) shuffleShardAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { // Start with the shuffle sharding, to pick a queue. queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2) queue := qs.queues[queueIdx] - // The next step is the logic to reject requests that have been waiting too long - qs.removeTimedOutRequestsFromQueueToBoundLocked(queue, fsName) - // NOTE: currently timeout is only checked for each new request. This means that there can be - // requests that are in the queue longer than the timeout if there are no new requests - // We prefer the simplicity over the promptness, at least for now. defer qs.boundNextDispatchLocked(queue) @@ -632,43 +616,6 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac return bestQueueIdx } -// removeTimedOutRequestsFromQueueToBoundLocked rejects old requests that have been enqueued -// past the requestWaitLimit -func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, fsName string) { - timeoutCount := 0 - disqueueSeats := 0 - now := qs.clock.Now() - reqs := queue.requestsWaiting - // reqs are sorted oldest -> newest - // can short circuit loop (break) if oldest requests are not timing out - // as newer requests also will not have timed out - - // now - requestWaitLimit = arrivalLimit - arrivalLimit := now.Add(-qs.qCfg.RequestWaitLimit) - reqs.Walk(func(req *request) bool { - if arrivalLimit.After(req.arrivalTime) { - if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil { - timeoutCount++ - disqueueSeats += req.MaxSeats() - req.NoteQueued(false) - metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) - } - // we need to check if the next request has timed out. - return true - } - // since reqs are sorted oldest -> newest, we are done here. - return false - }) - - // remove timed out requests from queue - if timeoutCount > 0 { - qs.totRequestsWaiting -= timeoutCount - qs.totSeatsWaiting -= disqueueSeats - qs.reqsGaugePair.RequestsWaiting.Add(float64(-timeoutCount)) - qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) - } -} - // rejectOrEnqueueToBoundLocked rejects or enqueues the newly arrived // request, which has been assigned to a queue. If up against the // queue length limit and the concurrency limit then returns false. @@ -702,6 +649,7 @@ func (qs *queueSet) enqueueToBoundLocked(request *request) { qs.totRequestsWaiting++ qs.totSeatsWaiting += request.MaxSeats() metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1) + metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, request.MaxSeats()) request.NoteQueued(true) qs.reqsGaugePair.RequestsWaiting.Add(1) qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) @@ -760,6 +708,7 @@ func (qs *queueSet) dispatchLocked() bool { qs.totRequestsWaiting-- qs.totSeatsWaiting -= request.MaxSeats() metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1) + metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -request.MaxSeats()) request.NoteQueued(false) qs.reqsGaugePair.RequestsWaiting.Add(-1) defer qs.boundNextDispatchLocked(queue) diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go index 54af4415cd..9fe7b15a05 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go @@ -210,6 +210,16 @@ var ( }, []string{priorityLevel, flowSchema}, ) + apiserverCurrentInqueueSeats = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "current_inqueue_seats", + Help: "Number of seats currently pending in queues of the API Priority and Fairness subsystem", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) apiserverRequestQueueLength = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Namespace: namespace, @@ -455,6 +465,7 @@ var ( apiserverNextSBounds, apiserverNextDiscountedSBounds, apiserverCurrentInqueueRequests, + apiserverCurrentInqueueSeats, apiserverRequestQueueLength, apiserverRequestConcurrencyLimit, apiserverRequestConcurrencyInUse, @@ -518,6 +529,11 @@ func AddRequestsInQueues(ctx context.Context, priorityLevel, flowSchema string, apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) } +// AddSeatsInQueues adds the given delta to the gauge of the # of seats in the queues of the specified flowSchema and priorityLevel +func AddSeatsInQueues(ctx context.Context, priorityLevel, flowSchema string, delta int) { + apiserverCurrentInqueueSeats.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) +} + // AddRequestsExecuting adds the given delta to the gauge of executing requests of the given flowSchema and priorityLevel func AddRequestsExecuting(ctx context.Context, priorityLevel, flowSchema string, delta int) { apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) diff --git a/vendor/k8s.io/cloud-provider/cloud.go b/vendor/k8s.io/cloud-provider/cloud.go index c9a04085f4..482656b370 100644 --- a/vendor/k8s.io/cloud-provider/cloud.go +++ b/vendor/k8s.io/cloud-provider/cloud.go @@ -98,6 +98,8 @@ func DefaultLoadBalancerName(service *v1.Service) string { } // GetInstanceProviderID builds a ProviderID for a node in a cloud. +// Note that if the instance does not exist, we must return ("", cloudprovider.InstanceNotFound) +// cloudprovider.InstanceNotFound should NOT be returned for instances that exist but are stopped/sleeping func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) { instances, ok := cloud.Instances() if !ok { @@ -108,8 +110,11 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types. if err == NotImplemented { return "", err } + if err == InstanceNotFound { + return "", err + } - return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err) + return "", fmt.Errorf("failed to get instance ID from cloud provider: %w", err) } return cloud.ProviderName() + "://" + instanceID, nil } diff --git a/vendor/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go b/vendor/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go index aa6601facd..b8a50e42cd 100644 --- a/vendor/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go +++ b/vendor/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go @@ -152,7 +152,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) { // At this point the node has NotReady status, we need to check if the node has been removed // from the cloud provider. If node cannot be found in cloudprovider, then delete the node - exists, err := ensureNodeExistsByProviderID(ctx, c.cloud, node) + exists, err := c.ensureNodeExistsByProviderID(ctx, node) if err != nil { klog.Errorf("error checking if node %s exists: %v", node.Name, err) continue @@ -180,7 +180,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) { // Node exists. We need to check this to get taint working in similar in all cloudproviders // current problem is that shutdown nodes are not working in similar way ie. all cloudproviders // does not delete node from kubernetes cluster when instance it is shutdown see issue #46442 - shutdown, err := shutdownInCloudProvider(ctx, c.cloud, node) + shutdown, err := c.shutdownInCloudProvider(ctx, node) if err != nil { klog.Errorf("error checking if node %s is shutdown: %v", node.Name, err) } @@ -196,18 +196,49 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) { } } +// getProviderID returns the provider ID for the node. If Node CR has no provider ID, +// it will be the one from the cloud provider. +func (c *CloudNodeLifecycleController) getProviderID(ctx context.Context, node *v1.Node) (string, error) { + if node.Spec.ProviderID != "" { + return node.Spec.ProviderID, nil + } + + if instanceV2, ok := c.cloud.InstancesV2(); ok { + metadata, err := instanceV2.InstanceMetadata(ctx, node) + if err != nil { + return "", err + } + return metadata.ProviderID, nil + } + + providerID, err := cloudprovider.GetInstanceProviderID(ctx, c.cloud, types.NodeName(node.Name)) + if err != nil { + return "", err + } + + return providerID, nil +} + // shutdownInCloudProvider returns true if the node is shutdown on the cloud provider -func shutdownInCloudProvider(ctx context.Context, cloud cloudprovider.Interface, node *v1.Node) (bool, error) { - if instanceV2, ok := cloud.InstancesV2(); ok { +func (c *CloudNodeLifecycleController) shutdownInCloudProvider(ctx context.Context, node *v1.Node) (bool, error) { + if instanceV2, ok := c.cloud.InstancesV2(); ok { return instanceV2.InstanceShutdown(ctx, node) } - instances, ok := cloud.Instances() + instances, ok := c.cloud.Instances() if !ok { return false, errors.New("cloud provider does not support instances") } - shutdown, err := instances.InstanceShutdownByProviderID(ctx, node.Spec.ProviderID) + providerID, err := c.getProviderID(ctx, node) + if err != nil { + if err == cloudprovider.InstanceNotFound { + return false, nil + } + return false, err + } + + shutdown, err := instances.InstanceShutdownByProviderID(ctx, providerID) if err == cloudprovider.NotImplemented { return false, nil } @@ -216,32 +247,22 @@ func shutdownInCloudProvider(ctx context.Context, cloud cloudprovider.Interface, } // ensureNodeExistsByProviderID checks if the instance exists by the provider id, -// If provider id in spec is empty it calls instanceId with node name to get provider id -func ensureNodeExistsByProviderID(ctx context.Context, cloud cloudprovider.Interface, node *v1.Node) (bool, error) { - if instanceV2, ok := cloud.InstancesV2(); ok { +func (c *CloudNodeLifecycleController) ensureNodeExistsByProviderID(ctx context.Context, node *v1.Node) (bool, error) { + if instanceV2, ok := c.cloud.InstancesV2(); ok { return instanceV2.InstanceExists(ctx, node) } - instances, ok := cloud.Instances() + instances, ok := c.cloud.Instances() if !ok { return false, errors.New("instances interface not supported in the cloud provider") } - providerID := node.Spec.ProviderID - if providerID == "" { - var err error - providerID, err = instances.InstanceID(ctx, types.NodeName(node.Name)) - if err != nil { - if err == cloudprovider.InstanceNotFound { - return false, nil - } - return false, err - } - - if providerID == "" { - klog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name) + providerID, err := c.getProviderID(ctx, node) + if err != nil { + if err == cloudprovider.InstanceNotFound { return false, nil } + return false, err } return instances.InstanceExistsByProviderID(ctx, providerID) diff --git a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go index 7907dfad12..fbfb3f5ea1 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go @@ -57,6 +57,7 @@ var ( func Register(registry k8smetrics.KubeRegistry) { registry.Register(healthcheck) registry.Register(healthchecksTotal) + _ = k8smetrics.RegisterProcessStartTime(registry.Register) } func ResetHealthMetrics() { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index cd9cbbb8b7..a15a3ce975 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -4940,6 +4940,46 @@ func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerS return allErrs } +// ValidateInitContainerStateTransition test to if any illegal init container state transitions are being attempted +func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, podSpec *core.PodSpec) field.ErrorList { + allErrs := field.ErrorList{} + // If we should always restart, containers are allowed to leave the terminated state + if podSpec.RestartPolicy == core.RestartPolicyAlways { + return allErrs + } + for i, oldStatus := range oldStatuses { + // Skip any container that is not terminated + if oldStatus.State.Terminated == nil { + continue + } + // Skip any container that failed but is allowed to restart + if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == core.RestartPolicyOnFailure { + continue + } + + // Skip any restartable init container that is allowed to restart + isRestartableInitContainer := false + for _, c := range podSpec.InitContainers { + if oldStatus.Name == c.Name { + if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways { + isRestartableInitContainer = true + } + break + } + } + if isRestartableInitContainer { + continue + } + + for _, newStatus := range newStatuses { + if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { + allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state")) + } + } + } + return allErrs +} + // ValidatePodStatusUpdate checks for changes to status that shouldn't occur in normal operation. func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList { fldPath := field.NewPath("metadata") @@ -4961,7 +5001,7 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions // If pod should not restart, make sure the status update does not transition // any terminated containers to a non-terminated state. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...) - allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...) + allErrs = append(allErrs, ValidateInitContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), &oldPod.Spec)...) // The kubelet will never restart ephemeral containers, so treat them like they have an implicit RestartPolicyNever. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), core.RestartPolicyNever)...) allErrs = append(allErrs, validatePodResourceClaimStatuses(newPod.Status.ResourceClaimStatuses, newPod.Spec.ResourceClaims, fldPath.Child("resourceClaimStatuses"))...) diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 56791b47c8..3cca890efd 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -250,7 +250,6 @@ const ( // owner: @harche // kep: http://kep.k8s.io/3386 // alpha: v1.25 - // beta: v1.27 // // Allows using event-driven PLEG (pod lifecycle event generator) through kubelet // which avoids frequent relisting of containers which helps optimize performance. @@ -1005,7 +1004,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, - EventedPLEG: {Default: false, PreRelease: featuregate.Beta}, // off by default, requires CRI Runtime support + EventedPLEG: {Default: false, PreRelease: featuregate.Alpha}, ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update @@ -1137,7 +1136,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS ElasticIndexedJob: {Default: true, PreRelease: featuregate.Beta}, - SchedulerQueueingHints: {Default: true, PreRelease: featuregate.Beta}, + SchedulerQueueingHints: {Default: false, PreRelease: featuregate.Beta}, SeccompDefault: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 @@ -1206,12 +1205,16 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + genericfeatures.SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: false, PreRelease: featuregate.Beta}, + genericfeatures.WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go index 6fc1564e38..6164e3697f 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go @@ -49,7 +49,8 @@ type NodeScore struct { Score int64 } -// NodeToStatusMap declares map from node name to its status. +// NodeToStatusMap contains the statuses of the Nodes where the incoming Pod was not schedulable. +// A PostFilter plugin that uses this map should interpret absent Nodes as UnschedulableAndUnresolvable. type NodeToStatusMap map[string]*Status // NodePluginScores is a struct with node name and scores for that node. @@ -394,6 +395,8 @@ type FilterPlugin interface { type PostFilterPlugin interface { Plugin // PostFilter is called by the scheduling framework. + // If there is no entry in the NodeToStatus map, its implicit status is UnschedulableAndUnresolvable. + // // A PostFilter plugin should return one of the following statuses: // - Unschedulable: the plugin gets executed successfully but the pod cannot be made schedulable. // - Success: the plugin gets executed successfully and the pod can be made schedulable. diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go index aa68a606f3..23d6b25675 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go @@ -286,8 +286,14 @@ type WeightedAffinityTerm struct { Weight int32 } +// ExtenderName is a fake plugin name put in UnschedulablePlugins when Extender rejected some Nodes. +const ExtenderName = "Extender" + // Diagnosis records the details to diagnose a scheduling failure. type Diagnosis struct { + // NodeToStatusMap records the status of each retriable node (status Unschedulable) + // While this map may contain UnschedulableAndUnresolvable statuses, the absence of + // a node should be interpreted as UnschedulableAndUnresolvable. NodeToStatusMap NodeToStatusMap UnschedulablePlugins sets.Set[string] // PreFilterMsg records the messages returned from PreFilter plugins. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go b/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go index af309353ba..238e919b33 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go @@ -102,6 +102,23 @@ func IsFailedPreconditionError(err error) bool { return errors.As(err, &failedPreconditionError) } +type OperationNotSupported struct { + msg string +} + +func (err *OperationNotSupported) Error() string { + return err.msg +} + +func NewOperationNotSupportedError(msg string) *OperationNotSupported { + return &OperationNotSupported{msg: msg} +} + +func IsOperationNotSupportedError(err error) bool { + var operationNotSupportedError *OperationNotSupported + return errors.As(err, &operationNotSupportedError) +} + // TransientOperationFailure indicates operation failed with a transient error // and may fix itself when retried. type TransientOperationFailure struct { diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml index f69371b933..019d7cfc7b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml @@ -1,5 +1,5 @@ # This DaemonSet was originally referenced from -# https://github.com/GoogleCloudPlatform/container-engine-accelerators/blob/master/daemonset.yaml +# https://github.com/GoogleCloudPlatform/container-engine-accelerators/blob/master/nvidia-driver-installer/cos/daemonset-preloaded.yaml # The Dockerfile and other source for this daemonset are in # https://github.com/GoogleCloudPlatform/cos-gpu-installer @@ -47,17 +47,43 @@ spec: - name: root-mount hostPath: path: / + - name: cos-tools + hostPath: + path: /var/lib/cos-tools + - name: nvidia-config + hostPath: + path: /etc/nvidia initContainers: + - image: "ubuntu@sha256:3f85b7caad41a95462cf5b787d8a04604c8262cdcdf9a472b8c52ef83375fe15" + name: bind-mount-install-dir + securityContext: + privileged: true + command: + - nsenter + - -at + - '1' + - -- + - sh + - -c + - | + if mountpoint -q /var/lib/nvidia; then + echo "The mountpoint /var/lib/nvidia exists." + else + echo "The mountpoint /var/lib/nvidia does not exist. Creating directories /home/kubernetes/bin/nvidia and /var/lib/nvidia and bind mount." + mkdir -p /var/lib/nvidia /home/kubernetes/bin/nvidia + mount --bind /home/kubernetes/bin/nvidia /var/lib/nvidia + echo "Done creating bind mounts" + fi # The COS GPU installer image version may be dependent on the version of COS being used. # Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/ # and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS. - # Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.27 - suitable for COS M97 as per https://cloud.google.com/container-optimized-os/docs/release-notes - - image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.27 + # Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.1.10 - suitable for COS M109 as per https://cloud.google.com/container-optimized-os/docs/release-notes + - image: "gcr.io/cos-cloud/cos-gpu-installer:v2.1.10" name: nvidia-driver-installer resources: requests: - cpu: 0.15 + cpu: 150m securityContext: privileged: true env: @@ -71,6 +97,10 @@ spec: value: /etc/vulkan/icd.d - name: ROOT_MOUNT_DIR value: /root + - name: COS_TOOLS_DIR_HOST + value: /var/lib/cos-tools + - name: COS_TOOLS_DIR_CONTAINER + value: /build/cos-tools volumeMounts: - name: nvidia-install-dir-host mountPath: /usr/local/nvidia @@ -80,6 +110,37 @@ spec: mountPath: /dev - name: root-mount mountPath: /root + - name: cos-tools + mountPath: /build/cos-tools + command: + - bash + - -c + - | + echo "Checking for existing GPU driver modules" + if lsmod | grep nvidia; then + echo "GPU driver is already installed, the installed version may or may not be the driver version being tried to install, skipping installation" + exit 0 + else + echo "No GPU driver module detected, installing now" + /cos-gpu-installer install + fi + - image: "gcr.io/gke-release/nvidia-partition-gpu@sha256:e226275da6c45816959fe43cde907ee9a85c6a2aa8a429418a4cadef8ecdb86a" + name: partition-gpus + env: + - name: LD_LIBRARY_PATH + value: /usr/local/nvidia/lib64 + resources: + requests: + cpu: 150m + securityContext: + privileged: true + volumeMounts: + - name: nvidia-install-dir-host + mountPath: /usr/local/nvidia + - name: dev + mountPath: /dev + - name: nvidia-config + mountPath: /etc/nvidia containers: - image: "registry.k8s.io/pause:3.9" name: pause diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml index e47e6fed26..3d33dfc066 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v3.4.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v4.5.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml index 8f806887f4..bdd93b894d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.4.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.11.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml index ba65d9e7f5..d80b5d793b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v3.1.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v4.0.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external @@ -61,6 +61,13 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch"] + # (Alpha) Access to referencegrants is only needed when the CSI driver + # has the CrossNamespaceVolumeDataSource controller capability. + # In that case, external-provisioner requires "get", "list", "watch" + # permissions for "referencegrants" on "gateway.networking.k8s.io". + #- apiGroups: ["gateway.networking.k8s.io"] + # resources: ["referencegrants"] + # verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding @@ -89,9 +96,6 @@ metadata: rules: # Only one of the following rules for endpoints or leases is required based on # what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases. -- apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml index 410d14ff90..4f8f7980d4 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.4.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.10.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external @@ -46,6 +46,10 @@ rules: - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] + # only required if enabling the alpha volume modify feature + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattributesclasses"] + verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding @@ -63,7 +67,7 @@ roleRef: apiGroup: rbac.authorization.k8s.io --- -# Resizer must be able to work with end point in current namespace +# Resizer must be able to work with `leases` in current namespace # if (and only if) leadership election is enabled kind: Role apiVersion: rbac.authorization.k8s.io/v1 diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml index 335538d44b..7b5f5ad34d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v5.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v7.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # Together with the RBAC file for external-provisioner, this YAML file @@ -12,6 +12,7 @@ # - optionally rename the non-namespaced ClusterRole if there # are conflicts with other deployments +--- apiVersion: v1 kind: ServiceAccount metadata: @@ -37,13 +38,24 @@ rules: - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch", "create"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + verbs: ["get", "list", "watch", "update", "patch", "create"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] verbs: ["update", "patch"] - + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents/status"] + verbs: ["update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md index 399316db2b..ce3a7694e1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md @@ -1,4 +1,4 @@ The files in this directory are exact copies of "kubernetes-latest" in -https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/ +https://github.com/kubernetes-csi/csi-driver-host-path/tree/release-1.13/deploy/ Do not edit manually. Run ./update-hostpath.sh to refresh the content. diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml index c8cf666a47..0250a52c1a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml @@ -15,3 +15,6 @@ spec: # To determine at runtime which mode a volume uses, pod info and its # "csi.storage.k8s.io/ephemeral" entry are needed. podInfoOnMount: true + # Kubernetes may use fsGroup to change permissions and ownership + # of the volume to match user requested fsGroup in the pod's SecurityPolicy + fsGroupPolicy: File diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml index 6a41a02391..eb4c163484 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml @@ -1,4 +1,4 @@ -# All of the individual sidecar RBAC roles get bound + # All of the individual sidecar RBAC roles get bound # to this account. kind: ServiceAccount apiVersion: v1 @@ -190,6 +190,7 @@ kind: StatefulSet apiVersion: apps/v1 metadata: name: csi-hostpathplugin + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -218,7 +219,7 @@ spec: serviceAccountName: csi-hostpathplugin-sa containers: - name: hostpath - image: registry.k8s.io/sig-storage/hostpathplugin:v1.11.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - "--drivername=hostpath.csi.k8s.io" - "--v=5" @@ -261,7 +262,7 @@ spec: name: dev-dir - name: csi-external-health-monitor-controller - image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0 + image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.11.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -275,7 +276,7 @@ spec: mountPath: /csi - name: node-driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -303,13 +304,13 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir - image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0 + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 args: - --csi-address=/csi/csi.sock - --health-port=9898 - name: csi-attacher - image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0 + image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -323,11 +324,12 @@ spec: name: socket-dir - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - -v=5 - --csi-address=/csi/csi.sock - --feature-gates=Topology=true + # end csi-provisioner args securityContext: # This is necessary only for systems with SELinux, where # non-privileged sidecar containers cannot access unix domain socket @@ -338,7 +340,7 @@ spec: name: socket-dir - name: csi-resizer - image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0 + image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0 args: - -v=5 - -csi-address=/csi/csi.sock @@ -352,7 +354,7 @@ spec: name: socket-dir - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1 args: - -v=5 - --csi-address=/csi/csi.sock diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml index 7253a70a9d..02e5e8d7c1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml @@ -11,6 +11,7 @@ apiVersion: v1 kind: Service metadata: name: hostpath-service + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -30,6 +31,7 @@ kind: StatefulSet apiVersion: apps/v1 metadata: name: csi-hostpath-socat + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -64,7 +66,9 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: socat - image: docker.io/alpine/socat:1.7.4.3-r0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 + command: + - socat args: - tcp-listen:10000,fork,reuseaddr - unix-connect:/csi/csi.sock diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml index 7684aea78c..b17687c617 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-attacher - image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0 + image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0 args: - --v=5 - --csi-address=$(ADDRESS) diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml index 242c0b5aa7..ef85e3170f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-resizer - image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0 + image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml index f788a4a8f2..6718060ec9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1 args: - "--v=5" - "--csi-address=$(ADDRESS)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index 4493deccc0..3035532c5d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - "--csi-address=$(ADDRESS)" # Topology support is needed for the pod rescheduling test @@ -34,7 +34,7 @@ spec: - mountPath: /csi name: socket-dir - name: driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -53,7 +53,7 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - "--drivername=mock.storage.k8s.io" - "--nodeid=$(KUBE_NODE_NAME)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml index d1aa8ece86..189c17af67 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - "--csi-address=$(ADDRESS)" # Topology support is needed for the pod rescheduling test @@ -35,7 +35,7 @@ spec: - mountPath: /csi name: socket-dir - name: driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -53,7 +53,7 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - -v=5 - -nodeid=$(KUBE_NODE_NAME) diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh index ce60b39bc3..122de0d18b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh @@ -137,5 +137,5 @@ done grep -r image: hostpath/hostpath/csi-hostpath-plugin.yaml | while read -r image; do version=$(echo "$image" | sed -e 's/.*:\(.*\)/\1/') image=$(echo "$image" | sed -e 's/.*image: \([^:]*\).*/\1/') - sed -i -e "s;$image:.*;$image:$version;" mock/*.yaml + sed -i '' -e "s;$image:.*;$image:$version;" mock/*.yaml done diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 578a0c0f4b..19113e0412 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -232,7 +232,7 @@ const ( func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) { configs := map[ImageID]Config{} - configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.45"} + configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.47"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} @@ -241,8 +241,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"} configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"} - configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.9-0"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.5.9"} + configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.15-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"} diff --git a/vendor/k8s.io/mount-utils/mount_helper_unix.go b/vendor/k8s.io/mount-utils/mount_helper_unix.go index 82210aaa23..1c603dca7a 100644 --- a/vendor/k8s.io/mount-utils/mount_helper_unix.go +++ b/vendor/k8s.io/mount-utils/mount_helper_unix.go @@ -61,7 +61,13 @@ func IsCorruptedMnt(err error) bool { underlyingError = err } - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES || underlyingError == syscall.EHOSTDOWN + return errors.Is(underlyingError, syscall.ENOTCONN) || + errors.Is(underlyingError, syscall.ESTALE) || + errors.Is(underlyingError, syscall.EIO) || + errors.Is(underlyingError, syscall.EACCES) || + errors.Is(underlyingError, syscall.EHOSTDOWN) || + errors.Is(underlyingError, syscall.EWOULDBLOCK) || + errors.Is(underlyingError, syscall.ENODEV) } // MountInfo represents a single line in /proc//mountinfo. diff --git a/vendor/modules.txt b/vendor/modules.txt index c0ffafa2c5..c16000d2c2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -462,10 +462,10 @@ golang.org/x/crypto/salsa20/salsa ## explicit; go 1.18 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/mod v0.12.0 -## explicit; go 1.17 +# golang.org/x/mod v0.14.0 +## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.21.0 +# golang.org/x/net v0.23.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -496,7 +496,6 @@ golang.org/x/sync/singleflight # golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu -golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows @@ -538,7 +537,7 @@ golang.org/x/text/width # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.12.0 +# golang.org/x/tools v0.16.1 ## explicit; go 1.18 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver @@ -556,6 +555,7 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/versions # google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine/internal @@ -693,7 +693,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.28.4 => k8s.io/api v0.28.4 +# k8s.io/api v0.28.15 => k8s.io/api v0.28.15 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -749,7 +749,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.28.2 => k8s.io/apiextensions-apiserver v0.28.4 +# k8s.io/apiextensions-apiserver v0.28.2 => k8s.io/apiextensions-apiserver v0.28.15 ## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -761,7 +761,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.28.4 => k8s.io/apimachinery v0.28.4 +# k8s.io/apimachinery v0.28.15 => k8s.io/apimachinery v0.28.15 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -823,7 +823,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.28.4 => k8s.io/apiserver v0.28.4 +# k8s.io/apiserver v0.28.15 => k8s.io/apiserver v0.28.15 ## explicit; go 1.20 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -968,7 +968,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.28.4 => k8s.io/client-go v0.28.4 +# k8s.io/client-go v0.28.15 => k8s.io/client-go v0.28.15 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1293,7 +1293,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.28.4 => k8s.io/cloud-provider v0.28.4 +# k8s.io/cloud-provider v0.28.15 => k8s.io/cloud-provider v0.28.15 ## explicit; go 1.20 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1316,7 +1316,7 @@ k8s.io/cloud-provider/options k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.28.4 => k8s.io/component-base v0.28.4 +# k8s.io/component-base v0.28.15 => k8s.io/component-base v0.28.15 ## explicit; go 1.20 k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -1345,13 +1345,13 @@ k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.28.4 => k8s.io/component-helpers v0.28.4 +# k8s.io/component-helpers v0.28.15 => k8s.io/component-helpers v0.28.15 ## explicit; go 1.20 k8s.io/component-helpers/node/topology k8s.io/component-helpers/node/util k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity -# k8s.io/controller-manager v0.28.4 => k8s.io/controller-manager v0.28.4 +# k8s.io/controller-manager v0.28.15 => k8s.io/controller-manager v0.28.15 ## explicit; go 1.20 k8s.io/controller-manager/app k8s.io/controller-manager/config @@ -1368,7 +1368,7 @@ k8s.io/controller-manager/pkg/informerfactory k8s.io/controller-manager/pkg/leadermigration k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options -# k8s.io/csi-translation-lib v0.28.4 => k8s.io/csi-translation-lib v0.28.4 +# k8s.io/csi-translation-lib v0.28.15 => k8s.io/csi-translation-lib v0.28.15 ## explicit; go 1.20 # k8s.io/klog v1.0.0 ## explicit; go 1.12 @@ -1381,7 +1381,7 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kms v0.28.4 => k8s.io/kms v0.28.4 +# k8s.io/kms v0.28.15 => k8s.io/kms v0.28.15 ## explicit; go 1.20 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 @@ -1409,16 +1409,16 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.28.4 +# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.28.15 ## explicit; go 1.20 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.28.4 => k8s.io/kubectl v0.28.4 +# k8s.io/kubectl v0.28.15 => k8s.io/kubectl v0.28.15 ## explicit; go 1.20 k8s.io/kubectl/pkg/scale -# k8s.io/kubelet v0.28.4 => k8s.io/kubelet v0.28.4 +# k8s.io/kubelet v0.28.15 => k8s.io/kubelet v0.28.15 ## explicit; go 1.20 k8s.io/kubelet/pkg/apis -# k8s.io/kubernetes v1.28.4 => k8s.io/kubernetes v1.28.4 +# k8s.io/kubernetes v1.28.15 => k8s.io/kubernetes v1.28.15 ## explicit; go 1.20 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1474,7 +1474,7 @@ k8s.io/kubernetes/test/e2e/framework/testfiles k8s.io/kubernetes/test/e2e/testing-manifests k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/image -# k8s.io/mount-utils v0.28.4 => k8s.io/mount-utils v0.28.4 +# k8s.io/mount-utils v0.28.15 => k8s.io/mount-utils v0.28.15 ## explicit; go 1.20 k8s.io/mount-utils # k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 @@ -1523,32 +1523,32 @@ sigs.k8s.io/yaml # github.com/docker/docker => github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb # github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0 # google.golang.org/grpc => google.golang.org/grpc v1.61.0 -# k8s.io/api => k8s.io/api v0.28.4 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.4 -# k8s.io/apimachinery => k8s.io/apimachinery v0.28.4 -# k8s.io/apiserver => k8s.io/apiserver v0.28.4 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.4 -# k8s.io/client-go => k8s.io/client-go v0.28.4 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.4 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.4 -# k8s.io/code-generator => k8s.io/code-generator v0.28.4 -# k8s.io/component-base => k8s.io/component-base v0.28.4 -# k8s.io/component-helpers => k8s.io/component-helpers v0.28.4 -# k8s.io/controller-manager => k8s.io/controller-manager v0.28.4 -# k8s.io/cri-api => k8s.io/cri-api v0.28.4 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.4 -# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.4 +# k8s.io/api => k8s.io/api v0.28.15 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.15 +# k8s.io/apimachinery => k8s.io/apimachinery v0.28.15 +# k8s.io/apiserver => k8s.io/apiserver v0.28.15 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.15 +# k8s.io/client-go => k8s.io/client-go v0.28.15 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.15 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.15 +# k8s.io/code-generator => k8s.io/code-generator v0.28.15 +# k8s.io/component-base => k8s.io/component-base v0.28.15 +# k8s.io/component-helpers => k8s.io/component-helpers v0.28.15 +# k8s.io/controller-manager => k8s.io/controller-manager v0.28.15 +# k8s.io/cri-api => k8s.io/cri-api v0.28.15 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.15 +# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.15 # k8s.io/endpointslice => k8s.io/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20230810203337-add7e14df11e -# k8s.io/kms => k8s.io/kms v0.28.4 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.4 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.4 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.4 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.4 -# k8s.io/kubectl => k8s.io/kubectl v0.28.4 -# k8s.io/kubelet => k8s.io/kubelet v0.28.4 -# k8s.io/kubernetes => k8s.io/kubernetes v1.28.4 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.4 -# k8s.io/metrics => k8s.io/metrics v0.28.4 -# k8s.io/mount-utils => k8s.io/mount-utils v0.28.4 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.4 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.4 +# k8s.io/kms => k8s.io/kms v0.28.15 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.15 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.15 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.15 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.15 +# k8s.io/kubectl => k8s.io/kubectl v0.28.15 +# k8s.io/kubelet => k8s.io/kubelet v0.28.15 +# k8s.io/kubernetes => k8s.io/kubernetes v1.28.15 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.15 +# k8s.io/metrics => k8s.io/metrics v0.28.15 +# k8s.io/mount-utils => k8s.io/mount-utils v0.28.15 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.15 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.15